##// END OF EJS Templates
Always remove appendopener tmp files (fixes issue235)....
Thomas Arendsen Hein -
r2232:ef3c039e default
parent child Browse files
Show More
@@ -1,156 +1,162 b''
1 # appendfile.py - special classes to make repo updates atomic
1 # appendfile.py - special classes to make repo updates atomic
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import *
8 from demandload import *
9 demandload(globals(), "cStringIO changelog errno manifest os tempfile util")
9 demandload(globals(), "cStringIO changelog errno manifest os tempfile util")
10
10
11 # writes to metadata files are ordered. reads: changelog, manifest,
11 # writes to metadata files are ordered. reads: changelog, manifest,
12 # normal files. writes: normal files, manifest, changelog.
12 # normal files. writes: normal files, manifest, changelog.
13
13
14 # manifest contains pointers to offsets in normal files. changelog
14 # manifest contains pointers to offsets in normal files. changelog
15 # contains pointers to offsets in manifest. if reader reads old
15 # contains pointers to offsets in manifest. if reader reads old
16 # changelog while manifest or normal files are written, it has no
16 # changelog while manifest or normal files are written, it has no
17 # pointers into new parts of those files that are maybe not consistent
17 # pointers into new parts of those files that are maybe not consistent
18 # yet, so will not read them.
18 # yet, so will not read them.
19
19
20 # localrepo.addchangegroup thinks it writes changelog first, then
20 # localrepo.addchangegroup thinks it writes changelog first, then
21 # manifest, then normal files (this is order they are available, and
21 # manifest, then normal files (this is order they are available, and
22 # needed for computing linkrev fields), but uses appendfile to hide
22 # needed for computing linkrev fields), but uses appendfile to hide
23 # updates from readers. data not written to manifest or changelog
23 # updates from readers. data not written to manifest or changelog
24 # until all normal files updated. write manifest first, then
24 # until all normal files updated. write manifest first, then
25 # changelog.
25 # changelog.
26
26
27 # with this write ordering, readers cannot see inconsistent view of
27 # with this write ordering, readers cannot see inconsistent view of
28 # repo during update.
28 # repo during update.
29
29
30 class appendfile(object):
30 class appendfile(object):
31 '''implement enough of file protocol to append to revlog file.
31 '''implement enough of file protocol to append to revlog file.
32 appended data is written to temp file. reads and seeks span real
32 appended data is written to temp file. reads and seeks span real
33 file and temp file. readers cannot see appended data until
33 file and temp file. readers cannot see appended data until
34 writedata called.'''
34 writedata called.'''
35
35
36 def __init__(self, fp, tmpname):
36 def __init__(self, fp, tmpname):
37 if tmpname:
37 if tmpname:
38 self.tmpname = tmpname
38 self.tmpname = tmpname
39 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
39 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
40 else:
40 else:
41 fd, self.tmpname = tempfile.mkstemp(prefix="hg-appendfile-")
41 fd, self.tmpname = tempfile.mkstemp(prefix="hg-appendfile-")
42 os.close(fd)
42 os.close(fd)
43 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
43 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
44 self.realfp = fp
44 self.realfp = fp
45 self.offset = fp.tell()
45 self.offset = fp.tell()
46 # real file is not written by anyone else. cache its size so
46 # real file is not written by anyone else. cache its size so
47 # seek and read can be fast.
47 # seek and read can be fast.
48 self.realsize = util.fstat(fp).st_size
48 self.realsize = util.fstat(fp).st_size
49 self.name = fp.name
49 self.name = fp.name
50
50
51 def end(self):
51 def end(self):
52 self.tmpfp.flush() # make sure the stat is correct
52 self.tmpfp.flush() # make sure the stat is correct
53 return self.realsize + util.fstat(self.tmpfp).st_size
53 return self.realsize + util.fstat(self.tmpfp).st_size
54
54
55 def tell(self):
55 def tell(self):
56 return self.offset
56 return self.offset
57
57
58 def flush(self):
58 def flush(self):
59 self.tmpfp.flush()
59 self.tmpfp.flush()
60
60
61 def close(self):
61 def close(self):
62 self.realfp.close()
62 self.realfp.close()
63 self.tmpfp.close()
63 self.tmpfp.close()
64
64
65 def seek(self, offset, whence=0):
65 def seek(self, offset, whence=0):
66 '''virtual file offset spans real file and temp file.'''
66 '''virtual file offset spans real file and temp file.'''
67 if whence == 0:
67 if whence == 0:
68 self.offset = offset
68 self.offset = offset
69 elif whence == 1:
69 elif whence == 1:
70 self.offset += offset
70 self.offset += offset
71 elif whence == 2:
71 elif whence == 2:
72 self.offset = self.end() + offset
72 self.offset = self.end() + offset
73
73
74 if self.offset < self.realsize:
74 if self.offset < self.realsize:
75 self.realfp.seek(self.offset)
75 self.realfp.seek(self.offset)
76 else:
76 else:
77 self.tmpfp.seek(self.offset - self.realsize)
77 self.tmpfp.seek(self.offset - self.realsize)
78
78
79 def read(self, count=-1):
79 def read(self, count=-1):
80 '''only trick here is reads that span real file and temp file.'''
80 '''only trick here is reads that span real file and temp file.'''
81 fp = cStringIO.StringIO()
81 fp = cStringIO.StringIO()
82 old_offset = self.offset
82 old_offset = self.offset
83 if self.offset < self.realsize:
83 if self.offset < self.realsize:
84 s = self.realfp.read(count)
84 s = self.realfp.read(count)
85 fp.write(s)
85 fp.write(s)
86 self.offset += len(s)
86 self.offset += len(s)
87 if count > 0:
87 if count > 0:
88 count -= len(s)
88 count -= len(s)
89 if count != 0:
89 if count != 0:
90 if old_offset != self.offset:
90 if old_offset != self.offset:
91 self.tmpfp.seek(self.offset - self.realsize)
91 self.tmpfp.seek(self.offset - self.realsize)
92 s = self.tmpfp.read(count)
92 s = self.tmpfp.read(count)
93 fp.write(s)
93 fp.write(s)
94 self.offset += len(s)
94 self.offset += len(s)
95 return fp.getvalue()
95 return fp.getvalue()
96
96
97 def write(self, s):
97 def write(self, s):
98 '''append to temp file.'''
98 '''append to temp file.'''
99 self.tmpfp.seek(0, 2)
99 self.tmpfp.seek(0, 2)
100 self.tmpfp.write(s)
100 self.tmpfp.write(s)
101 # all writes are appends, so offset must go to end of file.
101 # all writes are appends, so offset must go to end of file.
102 self.offset = self.realsize + self.tmpfp.tell()
102 self.offset = self.realsize + self.tmpfp.tell()
103
103
104 class appendopener(object):
104 class appendopener(object):
105 '''special opener for files that only read or append.'''
105 '''special opener for files that only read or append.'''
106
106
107 def __init__(self, opener):
107 def __init__(self, opener):
108 self.realopener = opener
108 self.realopener = opener
109 # key: file name, value: appendfile name
109 # key: file name, value: appendfile name
110 self.tmpnames = {}
110 self.tmpnames = {}
111
111
112 def __call__(self, name, mode='r'):
112 def __call__(self, name, mode='r'):
113 '''open file.'''
113 '''open file.'''
114
114
115 assert mode in 'ra+'
115 assert mode in 'ra+'
116 try:
116 try:
117 realfp = self.realopener(name, 'r')
117 realfp = self.realopener(name, 'r')
118 except IOError, err:
118 except IOError, err:
119 if err.errno != errno.ENOENT: raise
119 if err.errno != errno.ENOENT: raise
120 realfp = self.realopener(name, 'w+')
120 realfp = self.realopener(name, 'w+')
121 tmpname = self.tmpnames.get(name)
121 tmpname = self.tmpnames.get(name)
122 fp = appendfile(realfp, tmpname)
122 fp = appendfile(realfp, tmpname)
123 if tmpname is None:
123 if tmpname is None:
124 self.tmpnames[name] = fp.tmpname
124 self.tmpnames[name] = fp.tmpname
125 return fp
125 return fp
126
126
127 def writedata(self):
127 def writedata(self):
128 '''copy data from temp files to real files.'''
128 '''copy data from temp files to real files.'''
129 # write .d file before .i file.
129 # write .d file before .i file.
130 tmpnames = self.tmpnames.items()
130 tmpnames = self.tmpnames.items()
131 tmpnames.sort()
131 tmpnames.sort()
132 for name, tmpname in tmpnames:
132 for name, tmpname in tmpnames:
133 fp = open(tmpname, 'rb')
133 fp = open(tmpname, 'rb')
134 s = fp.read()
134 s = fp.read()
135 fp.close()
135 fp.close()
136 os.unlink(tmpname)
136 os.unlink(tmpname)
137 del self.tmpnames[name]
137 fp = self.realopener(name, 'a')
138 fp = self.realopener(name, 'a')
138 fp.write(s)
139 fp.write(s)
139 fp.close()
140 fp.close()
140
141
142 def cleanup(self):
143 '''delete temp files (this discards unwritten data!)'''
144 for tmpname in self.tmpnames.values():
145 os.unlink(tmpname)
146
141 # files for changelog and manifest are in different appendopeners, so
147 # files for changelog and manifest are in different appendopeners, so
142 # not mixed up together.
148 # not mixed up together.
143
149
144 class appendchangelog(changelog.changelog, appendopener):
150 class appendchangelog(changelog.changelog, appendopener):
145 def __init__(self, opener, version):
151 def __init__(self, opener, version):
146 appendopener.__init__(self, opener)
152 appendopener.__init__(self, opener)
147 changelog.changelog.__init__(self, self, version)
153 changelog.changelog.__init__(self, self, version)
148 def checkinlinesize(self, fp, tr):
154 def checkinlinesize(self, fp, tr):
149 return
155 return
150
156
151 class appendmanifest(manifest.manifest, appendopener):
157 class appendmanifest(manifest.manifest, appendopener):
152 def __init__(self, opener, version):
158 def __init__(self, opener, version):
153 appendopener.__init__(self, opener)
159 appendopener.__init__(self, opener)
154 manifest.manifest.__init__(self, self, version)
160 manifest.manifest.__init__(self, self, version)
155 def checkinlinesize(self, fp, tr):
161 def checkinlinesize(self, fp, tr):
156 return
162 return
@@ -1,2078 +1,2089 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "revlog traceback")
15 demandload(globals(), "revlog traceback")
16
16
17 class localrepository(object):
17 class localrepository(object):
18 def __del__(self):
18 def __del__(self):
19 self.transhandle = None
19 self.transhandle = None
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 if not path:
21 if not path:
22 p = os.getcwd()
22 p = os.getcwd()
23 while not os.path.isdir(os.path.join(p, ".hg")):
23 while not os.path.isdir(os.path.join(p, ".hg")):
24 oldp = p
24 oldp = p
25 p = os.path.dirname(p)
25 p = os.path.dirname(p)
26 if p == oldp:
26 if p == oldp:
27 raise repo.RepoError(_("no repo found"))
27 raise repo.RepoError(_("no repo found"))
28 path = p
28 path = p
29 self.path = os.path.join(path, ".hg")
29 self.path = os.path.join(path, ".hg")
30
30
31 if not create and not os.path.isdir(self.path):
31 if not create and not os.path.isdir(self.path):
32 raise repo.RepoError(_("repository %s not found") % path)
32 raise repo.RepoError(_("repository %s not found") % path)
33
33
34 self.root = os.path.abspath(path)
34 self.root = os.path.abspath(path)
35 self.origroot = path
35 self.origroot = path
36 self.ui = ui.ui(parentui=parentui)
36 self.ui = ui.ui(parentui=parentui)
37 self.opener = util.opener(self.path)
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
38 self.wopener = util.opener(self.root)
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 v = self.ui.revlogopts
45 v = self.ui.revlogopts
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 fl = v.get('flags', None)
48 fl = v.get('flags', None)
49 flags = 0
49 flags = 0
50 if fl != None:
50 if fl != None:
51 for x in fl.split():
51 for x in fl.split():
52 flags |= revlog.flagstr(x)
52 flags |= revlog.flagstr(x)
53 elif self.revlogv1:
53 elif self.revlogv1:
54 flags = revlog.REVLOG_DEFAULT_FLAGS
54 flags = revlog.REVLOG_DEFAULT_FLAGS
55
55
56 v = self.revlogversion | flags
56 v = self.revlogversion | flags
57 self.manifest = manifest.manifest(self.opener, v)
57 self.manifest = manifest.manifest(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
58 self.changelog = changelog.changelog(self.opener, v)
59
59
60 # the changelog might not have the inline index flag
60 # the changelog might not have the inline index flag
61 # on. If the format of the changelog is the same as found in
61 # on. If the format of the changelog is the same as found in
62 # .hgrc, apply any flags found in the .hgrc as well.
62 # .hgrc, apply any flags found in the .hgrc as well.
63 # Otherwise, just version from the changelog
63 # Otherwise, just version from the changelog
64 v = self.changelog.version
64 v = self.changelog.version
65 if v == self.revlogversion:
65 if v == self.revlogversion:
66 v |= flags
66 v |= flags
67 self.revlogversion = v
67 self.revlogversion = v
68
68
69 self.tagscache = None
69 self.tagscache = None
70 self.nodetagscache = None
70 self.nodetagscache = None
71 self.encodepats = None
71 self.encodepats = None
72 self.decodepats = None
72 self.decodepats = None
73 self.transhandle = None
73 self.transhandle = None
74
74
75 if create:
75 if create:
76 os.mkdir(self.path)
76 os.mkdir(self.path)
77 os.mkdir(self.join("data"))
77 os.mkdir(self.join("data"))
78
78
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80
80
81 def hook(self, name, throw=False, **args):
81 def hook(self, name, throw=False, **args):
82 def callhook(hname, funcname):
82 def callhook(hname, funcname):
83 '''call python hook. hook is callable object, looked up as
83 '''call python hook. hook is callable object, looked up as
84 name in python module. if callable returns "true", hook
84 name in python module. if callable returns "true", hook
85 fails, else passes. if hook raises exception, treated as
85 fails, else passes. if hook raises exception, treated as
86 hook failure. exception propagates if throw is "true".
86 hook failure. exception propagates if throw is "true".
87
87
88 reason for "true" meaning "hook failed" is so that
88 reason for "true" meaning "hook failed" is so that
89 unmodified commands (e.g. mercurial.commands.update) can
89 unmodified commands (e.g. mercurial.commands.update) can
90 be run as hooks without wrappers to convert return values.'''
90 be run as hooks without wrappers to convert return values.'''
91
91
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 d = funcname.rfind('.')
93 d = funcname.rfind('.')
94 if d == -1:
94 if d == -1:
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 % (hname, funcname))
96 % (hname, funcname))
97 modname = funcname[:d]
97 modname = funcname[:d]
98 try:
98 try:
99 obj = __import__(modname)
99 obj = __import__(modname)
100 except ImportError:
100 except ImportError:
101 raise util.Abort(_('%s hook is invalid '
101 raise util.Abort(_('%s hook is invalid '
102 '(import of "%s" failed)') %
102 '(import of "%s" failed)') %
103 (hname, modname))
103 (hname, modname))
104 try:
104 try:
105 for p in funcname.split('.')[1:]:
105 for p in funcname.split('.')[1:]:
106 obj = getattr(obj, p)
106 obj = getattr(obj, p)
107 except AttributeError, err:
107 except AttributeError, err:
108 raise util.Abort(_('%s hook is invalid '
108 raise util.Abort(_('%s hook is invalid '
109 '("%s" is not defined)') %
109 '("%s" is not defined)') %
110 (hname, funcname))
110 (hname, funcname))
111 if not callable(obj):
111 if not callable(obj):
112 raise util.Abort(_('%s hook is invalid '
112 raise util.Abort(_('%s hook is invalid '
113 '("%s" is not callable)') %
113 '("%s" is not callable)') %
114 (hname, funcname))
114 (hname, funcname))
115 try:
115 try:
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 except (KeyboardInterrupt, util.SignalInterrupt):
117 except (KeyboardInterrupt, util.SignalInterrupt):
118 raise
118 raise
119 except Exception, exc:
119 except Exception, exc:
120 if isinstance(exc, util.Abort):
120 if isinstance(exc, util.Abort):
121 self.ui.warn(_('error: %s hook failed: %s\n') %
121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 (hname, exc.args[0] % exc.args[1:]))
122 (hname, exc.args[0] % exc.args[1:]))
123 else:
123 else:
124 self.ui.warn(_('error: %s hook raised an exception: '
124 self.ui.warn(_('error: %s hook raised an exception: '
125 '%s\n') % (hname, exc))
125 '%s\n') % (hname, exc))
126 if throw:
126 if throw:
127 raise
127 raise
128 if self.ui.traceback:
128 if self.ui.traceback:
129 traceback.print_exc()
129 traceback.print_exc()
130 return True
130 return True
131 if r:
131 if r:
132 if throw:
132 if throw:
133 raise util.Abort(_('%s hook failed') % hname)
133 raise util.Abort(_('%s hook failed') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 return r
135 return r
136
136
137 def runhook(name, cmd):
137 def runhook(name, cmd):
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
140 [(k.upper(), v) for k, v in args.iteritems()])
140 [(k.upper(), v) for k, v in args.iteritems()])
141 r = util.system(cmd, environ=env, cwd=self.root)
141 r = util.system(cmd, environ=env, cwd=self.root)
142 if r:
142 if r:
143 desc, r = util.explain_exit(r)
143 desc, r = util.explain_exit(r)
144 if throw:
144 if throw:
145 raise util.Abort(_('%s hook %s') % (name, desc))
145 raise util.Abort(_('%s hook %s') % (name, desc))
146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
147 return r
147 return r
148
148
149 r = False
149 r = False
150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
151 if hname.split(".", 1)[0] == name and cmd]
151 if hname.split(".", 1)[0] == name and cmd]
152 hooks.sort()
152 hooks.sort()
153 for hname, cmd in hooks:
153 for hname, cmd in hooks:
154 if cmd.startswith('python:'):
154 if cmd.startswith('python:'):
155 r = callhook(hname, cmd[7:].strip()) or r
155 r = callhook(hname, cmd[7:].strip()) or r
156 else:
156 else:
157 r = runhook(hname, cmd) or r
157 r = runhook(hname, cmd) or r
158 return r
158 return r
159
159
160 def tags(self):
160 def tags(self):
161 '''return a mapping of tag to node'''
161 '''return a mapping of tag to node'''
162 if not self.tagscache:
162 if not self.tagscache:
163 self.tagscache = {}
163 self.tagscache = {}
164
164
165 def parsetag(line, context):
165 def parsetag(line, context):
166 if not line:
166 if not line:
167 return
167 return
168 s = l.split(" ", 1)
168 s = l.split(" ", 1)
169 if len(s) != 2:
169 if len(s) != 2:
170 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
170 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
171 return
171 return
172 node, key = s
172 node, key = s
173 try:
173 try:
174 bin_n = bin(node)
174 bin_n = bin(node)
175 except TypeError:
175 except TypeError:
176 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
176 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
177 return
177 return
178 if bin_n not in self.changelog.nodemap:
178 if bin_n not in self.changelog.nodemap:
179 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
179 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
180 return
180 return
181 self.tagscache[key.strip()] = bin_n
181 self.tagscache[key.strip()] = bin_n
182
182
183 # read each head of the tags file, ending with the tip
183 # read each head of the tags file, ending with the tip
184 # and add each tag found to the map, with "newer" ones
184 # and add each tag found to the map, with "newer" ones
185 # taking precedence
185 # taking precedence
186 fl = self.file(".hgtags")
186 fl = self.file(".hgtags")
187 h = fl.heads()
187 h = fl.heads()
188 h.reverse()
188 h.reverse()
189 for r in h:
189 for r in h:
190 count = 0
190 count = 0
191 for l in fl.read(r).splitlines():
191 for l in fl.read(r).splitlines():
192 count += 1
192 count += 1
193 parsetag(l, ".hgtags:%d" % count)
193 parsetag(l, ".hgtags:%d" % count)
194
194
195 try:
195 try:
196 f = self.opener("localtags")
196 f = self.opener("localtags")
197 count = 0
197 count = 0
198 for l in f:
198 for l in f:
199 count += 1
199 count += 1
200 parsetag(l, "localtags:%d" % count)
200 parsetag(l, "localtags:%d" % count)
201 except IOError:
201 except IOError:
202 pass
202 pass
203
203
204 self.tagscache['tip'] = self.changelog.tip()
204 self.tagscache['tip'] = self.changelog.tip()
205
205
206 return self.tagscache
206 return self.tagscache
207
207
208 def tagslist(self):
208 def tagslist(self):
209 '''return a list of tags ordered by revision'''
209 '''return a list of tags ordered by revision'''
210 l = []
210 l = []
211 for t, n in self.tags().items():
211 for t, n in self.tags().items():
212 try:
212 try:
213 r = self.changelog.rev(n)
213 r = self.changelog.rev(n)
214 except:
214 except:
215 r = -2 # sort to the beginning of the list if unknown
215 r = -2 # sort to the beginning of the list if unknown
216 l.append((r, t, n))
216 l.append((r, t, n))
217 l.sort()
217 l.sort()
218 return [(t, n) for r, t, n in l]
218 return [(t, n) for r, t, n in l]
219
219
220 def nodetags(self, node):
220 def nodetags(self, node):
221 '''return the tags associated with a node'''
221 '''return the tags associated with a node'''
222 if not self.nodetagscache:
222 if not self.nodetagscache:
223 self.nodetagscache = {}
223 self.nodetagscache = {}
224 for t, n in self.tags().items():
224 for t, n in self.tags().items():
225 self.nodetagscache.setdefault(n, []).append(t)
225 self.nodetagscache.setdefault(n, []).append(t)
226 return self.nodetagscache.get(node, [])
226 return self.nodetagscache.get(node, [])
227
227
228 def lookup(self, key):
228 def lookup(self, key):
229 try:
229 try:
230 return self.tags()[key]
230 return self.tags()[key]
231 except KeyError:
231 except KeyError:
232 try:
232 try:
233 return self.changelog.lookup(key)
233 return self.changelog.lookup(key)
234 except:
234 except:
235 raise repo.RepoError(_("unknown revision '%s'") % key)
235 raise repo.RepoError(_("unknown revision '%s'") % key)
236
236
237 def dev(self):
237 def dev(self):
238 return os.stat(self.path).st_dev
238 return os.stat(self.path).st_dev
239
239
240 def local(self):
240 def local(self):
241 return True
241 return True
242
242
243 def join(self, f):
243 def join(self, f):
244 return os.path.join(self.path, f)
244 return os.path.join(self.path, f)
245
245
246 def wjoin(self, f):
246 def wjoin(self, f):
247 return os.path.join(self.root, f)
247 return os.path.join(self.root, f)
248
248
249 def file(self, f):
249 def file(self, f):
250 if f[0] == '/':
250 if f[0] == '/':
251 f = f[1:]
251 f = f[1:]
252 return filelog.filelog(self.opener, f, self.revlogversion)
252 return filelog.filelog(self.opener, f, self.revlogversion)
253
253
254 def getcwd(self):
254 def getcwd(self):
255 return self.dirstate.getcwd()
255 return self.dirstate.getcwd()
256
256
257 def wfile(self, f, mode='r'):
257 def wfile(self, f, mode='r'):
258 return self.wopener(f, mode)
258 return self.wopener(f, mode)
259
259
260 def wread(self, filename):
260 def wread(self, filename):
261 if self.encodepats == None:
261 if self.encodepats == None:
262 l = []
262 l = []
263 for pat, cmd in self.ui.configitems("encode"):
263 for pat, cmd in self.ui.configitems("encode"):
264 mf = util.matcher(self.root, "", [pat], [], [])[1]
264 mf = util.matcher(self.root, "", [pat], [], [])[1]
265 l.append((mf, cmd))
265 l.append((mf, cmd))
266 self.encodepats = l
266 self.encodepats = l
267
267
268 data = self.wopener(filename, 'r').read()
268 data = self.wopener(filename, 'r').read()
269
269
270 for mf, cmd in self.encodepats:
270 for mf, cmd in self.encodepats:
271 if mf(filename):
271 if mf(filename):
272 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
272 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
273 data = util.filter(data, cmd)
273 data = util.filter(data, cmd)
274 break
274 break
275
275
276 return data
276 return data
277
277
278 def wwrite(self, filename, data, fd=None):
278 def wwrite(self, filename, data, fd=None):
279 if self.decodepats == None:
279 if self.decodepats == None:
280 l = []
280 l = []
281 for pat, cmd in self.ui.configitems("decode"):
281 for pat, cmd in self.ui.configitems("decode"):
282 mf = util.matcher(self.root, "", [pat], [], [])[1]
282 mf = util.matcher(self.root, "", [pat], [], [])[1]
283 l.append((mf, cmd))
283 l.append((mf, cmd))
284 self.decodepats = l
284 self.decodepats = l
285
285
286 for mf, cmd in self.decodepats:
286 for mf, cmd in self.decodepats:
287 if mf(filename):
287 if mf(filename):
288 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
288 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
289 data = util.filter(data, cmd)
289 data = util.filter(data, cmd)
290 break
290 break
291
291
292 if fd:
292 if fd:
293 return fd.write(data)
293 return fd.write(data)
294 return self.wopener(filename, 'w').write(data)
294 return self.wopener(filename, 'w').write(data)
295
295
296 def transaction(self):
296 def transaction(self):
297 tr = self.transhandle
297 tr = self.transhandle
298 if tr != None and tr.running():
298 if tr != None and tr.running():
299 return tr.nest()
299 return tr.nest()
300
300
301 # save dirstate for undo
301 # save dirstate for undo
302 try:
302 try:
303 ds = self.opener("dirstate").read()
303 ds = self.opener("dirstate").read()
304 except IOError:
304 except IOError:
305 ds = ""
305 ds = ""
306 self.opener("journal.dirstate", "w").write(ds)
306 self.opener("journal.dirstate", "w").write(ds)
307
307
308 tr = transaction.transaction(self.ui.warn, self.opener,
308 tr = transaction.transaction(self.ui.warn, self.opener,
309 self.join("journal"),
309 self.join("journal"),
310 aftertrans(self.path))
310 aftertrans(self.path))
311 self.transhandle = tr
311 self.transhandle = tr
312 return tr
312 return tr
313
313
314 def recover(self):
314 def recover(self):
315 l = self.lock()
315 l = self.lock()
316 if os.path.exists(self.join("journal")):
316 if os.path.exists(self.join("journal")):
317 self.ui.status(_("rolling back interrupted transaction\n"))
317 self.ui.status(_("rolling back interrupted transaction\n"))
318 transaction.rollback(self.opener, self.join("journal"))
318 transaction.rollback(self.opener, self.join("journal"))
319 self.reload()
319 self.reload()
320 return True
320 return True
321 else:
321 else:
322 self.ui.warn(_("no interrupted transaction available\n"))
322 self.ui.warn(_("no interrupted transaction available\n"))
323 return False
323 return False
324
324
325 def undo(self, wlock=None):
325 def undo(self, wlock=None):
326 if not wlock:
326 if not wlock:
327 wlock = self.wlock()
327 wlock = self.wlock()
328 l = self.lock()
328 l = self.lock()
329 if os.path.exists(self.join("undo")):
329 if os.path.exists(self.join("undo")):
330 self.ui.status(_("rolling back last transaction\n"))
330 self.ui.status(_("rolling back last transaction\n"))
331 transaction.rollback(self.opener, self.join("undo"))
331 transaction.rollback(self.opener, self.join("undo"))
332 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
332 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
333 self.reload()
333 self.reload()
334 self.wreload()
334 self.wreload()
335 else:
335 else:
336 self.ui.warn(_("no undo information available\n"))
336 self.ui.warn(_("no undo information available\n"))
337
337
338 def wreload(self):
338 def wreload(self):
339 self.dirstate.read()
339 self.dirstate.read()
340
340
341 def reload(self):
341 def reload(self):
342 self.changelog.load()
342 self.changelog.load()
343 self.manifest.load()
343 self.manifest.load()
344 self.tagscache = None
344 self.tagscache = None
345 self.nodetagscache = None
345 self.nodetagscache = None
346
346
347 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
347 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
348 desc=None):
348 desc=None):
349 try:
349 try:
350 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
350 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
351 except lock.LockHeld, inst:
351 except lock.LockHeld, inst:
352 if not wait:
352 if not wait:
353 raise
353 raise
354 self.ui.warn(_("waiting for lock on %s held by %s\n") %
354 self.ui.warn(_("waiting for lock on %s held by %s\n") %
355 (desc, inst.args[0]))
355 (desc, inst.args[0]))
356 # default to 600 seconds timeout
356 # default to 600 seconds timeout
357 l = lock.lock(self.join(lockname),
357 l = lock.lock(self.join(lockname),
358 int(self.ui.config("ui", "timeout") or 600),
358 int(self.ui.config("ui", "timeout") or 600),
359 releasefn, desc=desc)
359 releasefn, desc=desc)
360 if acquirefn:
360 if acquirefn:
361 acquirefn()
361 acquirefn()
362 return l
362 return l
363
363
364 def lock(self, wait=1):
364 def lock(self, wait=1):
365 return self.do_lock("lock", wait, acquirefn=self.reload,
365 return self.do_lock("lock", wait, acquirefn=self.reload,
366 desc=_('repository %s') % self.origroot)
366 desc=_('repository %s') % self.origroot)
367
367
368 def wlock(self, wait=1):
368 def wlock(self, wait=1):
369 return self.do_lock("wlock", wait, self.dirstate.write,
369 return self.do_lock("wlock", wait, self.dirstate.write,
370 self.wreload,
370 self.wreload,
371 desc=_('working directory of %s') % self.origroot)
371 desc=_('working directory of %s') % self.origroot)
372
372
373 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
373 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
374 "determine whether a new filenode is needed"
374 "determine whether a new filenode is needed"
375 fp1 = manifest1.get(filename, nullid)
375 fp1 = manifest1.get(filename, nullid)
376 fp2 = manifest2.get(filename, nullid)
376 fp2 = manifest2.get(filename, nullid)
377
377
378 if fp2 != nullid:
378 if fp2 != nullid:
379 # is one parent an ancestor of the other?
379 # is one parent an ancestor of the other?
380 fpa = filelog.ancestor(fp1, fp2)
380 fpa = filelog.ancestor(fp1, fp2)
381 if fpa == fp1:
381 if fpa == fp1:
382 fp1, fp2 = fp2, nullid
382 fp1, fp2 = fp2, nullid
383 elif fpa == fp2:
383 elif fpa == fp2:
384 fp2 = nullid
384 fp2 = nullid
385
385
386 # is the file unmodified from the parent? report existing entry
386 # is the file unmodified from the parent? report existing entry
387 if fp2 == nullid and text == filelog.read(fp1):
387 if fp2 == nullid and text == filelog.read(fp1):
388 return (fp1, None, None)
388 return (fp1, None, None)
389
389
390 return (None, fp1, fp2)
390 return (None, fp1, fp2)
391
391
392 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
392 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
393 orig_parent = self.dirstate.parents()[0] or nullid
393 orig_parent = self.dirstate.parents()[0] or nullid
394 p1 = p1 or self.dirstate.parents()[0] or nullid
394 p1 = p1 or self.dirstate.parents()[0] or nullid
395 p2 = p2 or self.dirstate.parents()[1] or nullid
395 p2 = p2 or self.dirstate.parents()[1] or nullid
396 c1 = self.changelog.read(p1)
396 c1 = self.changelog.read(p1)
397 c2 = self.changelog.read(p2)
397 c2 = self.changelog.read(p2)
398 m1 = self.manifest.read(c1[0])
398 m1 = self.manifest.read(c1[0])
399 mf1 = self.manifest.readflags(c1[0])
399 mf1 = self.manifest.readflags(c1[0])
400 m2 = self.manifest.read(c2[0])
400 m2 = self.manifest.read(c2[0])
401 changed = []
401 changed = []
402
402
403 if orig_parent == p1:
403 if orig_parent == p1:
404 update_dirstate = 1
404 update_dirstate = 1
405 else:
405 else:
406 update_dirstate = 0
406 update_dirstate = 0
407
407
408 if not wlock:
408 if not wlock:
409 wlock = self.wlock()
409 wlock = self.wlock()
410 l = self.lock()
410 l = self.lock()
411 tr = self.transaction()
411 tr = self.transaction()
412 mm = m1.copy()
412 mm = m1.copy()
413 mfm = mf1.copy()
413 mfm = mf1.copy()
414 linkrev = self.changelog.count()
414 linkrev = self.changelog.count()
415 for f in files:
415 for f in files:
416 try:
416 try:
417 t = self.wread(f)
417 t = self.wread(f)
418 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
418 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
419 r = self.file(f)
419 r = self.file(f)
420 mfm[f] = tm
420 mfm[f] = tm
421
421
422 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
422 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
423 if entry:
423 if entry:
424 mm[f] = entry
424 mm[f] = entry
425 continue
425 continue
426
426
427 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
427 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
428 changed.append(f)
428 changed.append(f)
429 if update_dirstate:
429 if update_dirstate:
430 self.dirstate.update([f], "n")
430 self.dirstate.update([f], "n")
431 except IOError:
431 except IOError:
432 try:
432 try:
433 del mm[f]
433 del mm[f]
434 del mfm[f]
434 del mfm[f]
435 if update_dirstate:
435 if update_dirstate:
436 self.dirstate.forget([f])
436 self.dirstate.forget([f])
437 except:
437 except:
438 # deleted from p2?
438 # deleted from p2?
439 pass
439 pass
440
440
441 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
441 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
442 user = user or self.ui.username()
442 user = user or self.ui.username()
443 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
443 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
444 tr.close()
444 tr.close()
445 if update_dirstate:
445 if update_dirstate:
446 self.dirstate.setparents(n, nullid)
446 self.dirstate.setparents(n, nullid)
447
447
448 def commit(self, files=None, text="", user=None, date=None,
448 def commit(self, files=None, text="", user=None, date=None,
449 match=util.always, force=False, lock=None, wlock=None):
449 match=util.always, force=False, lock=None, wlock=None):
450 commit = []
450 commit = []
451 remove = []
451 remove = []
452 changed = []
452 changed = []
453
453
454 if files:
454 if files:
455 for f in files:
455 for f in files:
456 s = self.dirstate.state(f)
456 s = self.dirstate.state(f)
457 if s in 'nmai':
457 if s in 'nmai':
458 commit.append(f)
458 commit.append(f)
459 elif s == 'r':
459 elif s == 'r':
460 remove.append(f)
460 remove.append(f)
461 else:
461 else:
462 self.ui.warn(_("%s not tracked!\n") % f)
462 self.ui.warn(_("%s not tracked!\n") % f)
463 else:
463 else:
464 modified, added, removed, deleted, unknown = self.changes(match=match)
464 modified, added, removed, deleted, unknown = self.changes(match=match)
465 commit = modified + added
465 commit = modified + added
466 remove = removed
466 remove = removed
467
467
468 p1, p2 = self.dirstate.parents()
468 p1, p2 = self.dirstate.parents()
469 c1 = self.changelog.read(p1)
469 c1 = self.changelog.read(p1)
470 c2 = self.changelog.read(p2)
470 c2 = self.changelog.read(p2)
471 m1 = self.manifest.read(c1[0])
471 m1 = self.manifest.read(c1[0])
472 mf1 = self.manifest.readflags(c1[0])
472 mf1 = self.manifest.readflags(c1[0])
473 m2 = self.manifest.read(c2[0])
473 m2 = self.manifest.read(c2[0])
474
474
475 if not commit and not remove and not force and p2 == nullid:
475 if not commit and not remove and not force and p2 == nullid:
476 self.ui.status(_("nothing changed\n"))
476 self.ui.status(_("nothing changed\n"))
477 return None
477 return None
478
478
479 xp1 = hex(p1)
479 xp1 = hex(p1)
480 if p2 == nullid: xp2 = ''
480 if p2 == nullid: xp2 = ''
481 else: xp2 = hex(p2)
481 else: xp2 = hex(p2)
482
482
483 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
483 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
484
484
485 if not wlock:
485 if not wlock:
486 wlock = self.wlock()
486 wlock = self.wlock()
487 if not lock:
487 if not lock:
488 lock = self.lock()
488 lock = self.lock()
489 tr = self.transaction()
489 tr = self.transaction()
490
490
491 # check in files
491 # check in files
492 new = {}
492 new = {}
493 linkrev = self.changelog.count()
493 linkrev = self.changelog.count()
494 commit.sort()
494 commit.sort()
495 for f in commit:
495 for f in commit:
496 self.ui.note(f + "\n")
496 self.ui.note(f + "\n")
497 try:
497 try:
498 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
498 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
499 t = self.wread(f)
499 t = self.wread(f)
500 except IOError:
500 except IOError:
501 self.ui.warn(_("trouble committing %s!\n") % f)
501 self.ui.warn(_("trouble committing %s!\n") % f)
502 raise
502 raise
503
503
504 r = self.file(f)
504 r = self.file(f)
505
505
506 meta = {}
506 meta = {}
507 cp = self.dirstate.copied(f)
507 cp = self.dirstate.copied(f)
508 if cp:
508 if cp:
509 meta["copy"] = cp
509 meta["copy"] = cp
510 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
510 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
511 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
511 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
512 fp1, fp2 = nullid, nullid
512 fp1, fp2 = nullid, nullid
513 else:
513 else:
514 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
514 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
515 if entry:
515 if entry:
516 new[f] = entry
516 new[f] = entry
517 continue
517 continue
518
518
519 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
519 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
520 # remember what we've added so that we can later calculate
520 # remember what we've added so that we can later calculate
521 # the files to pull from a set of changesets
521 # the files to pull from a set of changesets
522 changed.append(f)
522 changed.append(f)
523
523
524 # update manifest
524 # update manifest
525 m1 = m1.copy()
525 m1 = m1.copy()
526 m1.update(new)
526 m1.update(new)
527 for f in remove:
527 for f in remove:
528 if f in m1:
528 if f in m1:
529 del m1[f]
529 del m1[f]
530 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
530 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
531 (new, remove))
531 (new, remove))
532
532
533 # add changeset
533 # add changeset
534 new = new.keys()
534 new = new.keys()
535 new.sort()
535 new.sort()
536
536
537 user = user or self.ui.username()
537 user = user or self.ui.username()
538 if not text:
538 if not text:
539 edittext = [""]
539 edittext = [""]
540 if p2 != nullid:
540 if p2 != nullid:
541 edittext.append("HG: branch merge")
541 edittext.append("HG: branch merge")
542 edittext.extend(["HG: changed %s" % f for f in changed])
542 edittext.extend(["HG: changed %s" % f for f in changed])
543 edittext.extend(["HG: removed %s" % f for f in remove])
543 edittext.extend(["HG: removed %s" % f for f in remove])
544 if not changed and not remove:
544 if not changed and not remove:
545 edittext.append("HG: no files changed")
545 edittext.append("HG: no files changed")
546 edittext.append("")
546 edittext.append("")
547 # run editor in the repository root
547 # run editor in the repository root
548 olddir = os.getcwd()
548 olddir = os.getcwd()
549 os.chdir(self.root)
549 os.chdir(self.root)
550 edittext = self.ui.edit("\n".join(edittext), user)
550 edittext = self.ui.edit("\n".join(edittext), user)
551 os.chdir(olddir)
551 os.chdir(olddir)
552 if not edittext.rstrip():
552 if not edittext.rstrip():
553 return None
553 return None
554 text = edittext
554 text = edittext
555
555
556 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
556 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
557 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
557 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
558 parent2=xp2)
558 parent2=xp2)
559 tr.close()
559 tr.close()
560
560
561 self.dirstate.setparents(n)
561 self.dirstate.setparents(n)
562 self.dirstate.update(new, "n")
562 self.dirstate.update(new, "n")
563 self.dirstate.forget(remove)
563 self.dirstate.forget(remove)
564
564
565 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
565 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
566 return n
566 return n
567
567
568 def walk(self, node=None, files=[], match=util.always, badmatch=None):
568 def walk(self, node=None, files=[], match=util.always, badmatch=None):
569 if node:
569 if node:
570 fdict = dict.fromkeys(files)
570 fdict = dict.fromkeys(files)
571 for fn in self.manifest.read(self.changelog.read(node)[0]):
571 for fn in self.manifest.read(self.changelog.read(node)[0]):
572 fdict.pop(fn, None)
572 fdict.pop(fn, None)
573 if match(fn):
573 if match(fn):
574 yield 'm', fn
574 yield 'm', fn
575 for fn in fdict:
575 for fn in fdict:
576 if badmatch and badmatch(fn):
576 if badmatch and badmatch(fn):
577 if match(fn):
577 if match(fn):
578 yield 'b', fn
578 yield 'b', fn
579 else:
579 else:
580 self.ui.warn(_('%s: No such file in rev %s\n') % (
580 self.ui.warn(_('%s: No such file in rev %s\n') % (
581 util.pathto(self.getcwd(), fn), short(node)))
581 util.pathto(self.getcwd(), fn), short(node)))
582 else:
582 else:
583 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
583 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
584 yield src, fn
584 yield src, fn
585
585
586 def changes(self, node1=None, node2=None, files=[], match=util.always,
586 def changes(self, node1=None, node2=None, files=[], match=util.always,
587 wlock=None, show_ignored=None):
587 wlock=None, show_ignored=None):
588 """return changes between two nodes or node and working directory
588 """return changes between two nodes or node and working directory
589
589
590 If node1 is None, use the first dirstate parent instead.
590 If node1 is None, use the first dirstate parent instead.
591 If node2 is None, compare node1 with working directory.
591 If node2 is None, compare node1 with working directory.
592 """
592 """
593
593
594 def fcmp(fn, mf):
594 def fcmp(fn, mf):
595 t1 = self.wread(fn)
595 t1 = self.wread(fn)
596 t2 = self.file(fn).read(mf.get(fn, nullid))
596 t2 = self.file(fn).read(mf.get(fn, nullid))
597 return cmp(t1, t2)
597 return cmp(t1, t2)
598
598
599 def mfmatches(node):
599 def mfmatches(node):
600 change = self.changelog.read(node)
600 change = self.changelog.read(node)
601 mf = dict(self.manifest.read(change[0]))
601 mf = dict(self.manifest.read(change[0]))
602 for fn in mf.keys():
602 for fn in mf.keys():
603 if not match(fn):
603 if not match(fn):
604 del mf[fn]
604 del mf[fn]
605 return mf
605 return mf
606
606
607 if node1:
607 if node1:
608 # read the manifest from node1 before the manifest from node2,
608 # read the manifest from node1 before the manifest from node2,
609 # so that we'll hit the manifest cache if we're going through
609 # so that we'll hit the manifest cache if we're going through
610 # all the revisions in parent->child order.
610 # all the revisions in parent->child order.
611 mf1 = mfmatches(node1)
611 mf1 = mfmatches(node1)
612
612
613 # are we comparing the working directory?
613 # are we comparing the working directory?
614 if not node2:
614 if not node2:
615 if not wlock:
615 if not wlock:
616 try:
616 try:
617 wlock = self.wlock(wait=0)
617 wlock = self.wlock(wait=0)
618 except lock.LockException:
618 except lock.LockException:
619 wlock = None
619 wlock = None
620 lookup, modified, added, removed, deleted, unknown, ignored = (
620 lookup, modified, added, removed, deleted, unknown, ignored = (
621 self.dirstate.changes(files, match, show_ignored))
621 self.dirstate.changes(files, match, show_ignored))
622
622
623 # are we comparing working dir against its parent?
623 # are we comparing working dir against its parent?
624 if not node1:
624 if not node1:
625 if lookup:
625 if lookup:
626 # do a full compare of any files that might have changed
626 # do a full compare of any files that might have changed
627 mf2 = mfmatches(self.dirstate.parents()[0])
627 mf2 = mfmatches(self.dirstate.parents()[0])
628 for f in lookup:
628 for f in lookup:
629 if fcmp(f, mf2):
629 if fcmp(f, mf2):
630 modified.append(f)
630 modified.append(f)
631 elif wlock is not None:
631 elif wlock is not None:
632 self.dirstate.update([f], "n")
632 self.dirstate.update([f], "n")
633 else:
633 else:
634 # we are comparing working dir against non-parent
634 # we are comparing working dir against non-parent
635 # generate a pseudo-manifest for the working dir
635 # generate a pseudo-manifest for the working dir
636 mf2 = mfmatches(self.dirstate.parents()[0])
636 mf2 = mfmatches(self.dirstate.parents()[0])
637 for f in lookup + modified + added:
637 for f in lookup + modified + added:
638 mf2[f] = ""
638 mf2[f] = ""
639 for f in removed:
639 for f in removed:
640 if f in mf2:
640 if f in mf2:
641 del mf2[f]
641 del mf2[f]
642 else:
642 else:
643 # we are comparing two revisions
643 # we are comparing two revisions
644 deleted, unknown, ignored = [], [], []
644 deleted, unknown, ignored = [], [], []
645 mf2 = mfmatches(node2)
645 mf2 = mfmatches(node2)
646
646
647 if node1:
647 if node1:
648 # flush lists from dirstate before comparing manifests
648 # flush lists from dirstate before comparing manifests
649 modified, added = [], []
649 modified, added = [], []
650
650
651 for fn in mf2:
651 for fn in mf2:
652 if mf1.has_key(fn):
652 if mf1.has_key(fn):
653 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
653 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
654 modified.append(fn)
654 modified.append(fn)
655 del mf1[fn]
655 del mf1[fn]
656 else:
656 else:
657 added.append(fn)
657 added.append(fn)
658
658
659 removed = mf1.keys()
659 removed = mf1.keys()
660
660
661 # sort and return results:
661 # sort and return results:
662 for l in modified, added, removed, deleted, unknown, ignored:
662 for l in modified, added, removed, deleted, unknown, ignored:
663 l.sort()
663 l.sort()
664 if show_ignored is None:
664 if show_ignored is None:
665 return (modified, added, removed, deleted, unknown)
665 return (modified, added, removed, deleted, unknown)
666 else:
666 else:
667 return (modified, added, removed, deleted, unknown, ignored)
667 return (modified, added, removed, deleted, unknown, ignored)
668
668
669 def add(self, list, wlock=None):
669 def add(self, list, wlock=None):
670 if not wlock:
670 if not wlock:
671 wlock = self.wlock()
671 wlock = self.wlock()
672 for f in list:
672 for f in list:
673 p = self.wjoin(f)
673 p = self.wjoin(f)
674 if not os.path.exists(p):
674 if not os.path.exists(p):
675 self.ui.warn(_("%s does not exist!\n") % f)
675 self.ui.warn(_("%s does not exist!\n") % f)
676 elif not os.path.isfile(p):
676 elif not os.path.isfile(p):
677 self.ui.warn(_("%s not added: only files supported currently\n")
677 self.ui.warn(_("%s not added: only files supported currently\n")
678 % f)
678 % f)
679 elif self.dirstate.state(f) in 'an':
679 elif self.dirstate.state(f) in 'an':
680 self.ui.warn(_("%s already tracked!\n") % f)
680 self.ui.warn(_("%s already tracked!\n") % f)
681 else:
681 else:
682 self.dirstate.update([f], "a")
682 self.dirstate.update([f], "a")
683
683
684 def forget(self, list, wlock=None):
684 def forget(self, list, wlock=None):
685 if not wlock:
685 if not wlock:
686 wlock = self.wlock()
686 wlock = self.wlock()
687 for f in list:
687 for f in list:
688 if self.dirstate.state(f) not in 'ai':
688 if self.dirstate.state(f) not in 'ai':
689 self.ui.warn(_("%s not added!\n") % f)
689 self.ui.warn(_("%s not added!\n") % f)
690 else:
690 else:
691 self.dirstate.forget([f])
691 self.dirstate.forget([f])
692
692
693 def remove(self, list, unlink=False, wlock=None):
693 def remove(self, list, unlink=False, wlock=None):
694 if unlink:
694 if unlink:
695 for f in list:
695 for f in list:
696 try:
696 try:
697 util.unlink(self.wjoin(f))
697 util.unlink(self.wjoin(f))
698 except OSError, inst:
698 except OSError, inst:
699 if inst.errno != errno.ENOENT:
699 if inst.errno != errno.ENOENT:
700 raise
700 raise
701 if not wlock:
701 if not wlock:
702 wlock = self.wlock()
702 wlock = self.wlock()
703 for f in list:
703 for f in list:
704 p = self.wjoin(f)
704 p = self.wjoin(f)
705 if os.path.exists(p):
705 if os.path.exists(p):
706 self.ui.warn(_("%s still exists!\n") % f)
706 self.ui.warn(_("%s still exists!\n") % f)
707 elif self.dirstate.state(f) == 'a':
707 elif self.dirstate.state(f) == 'a':
708 self.dirstate.forget([f])
708 self.dirstate.forget([f])
709 elif f not in self.dirstate:
709 elif f not in self.dirstate:
710 self.ui.warn(_("%s not tracked!\n") % f)
710 self.ui.warn(_("%s not tracked!\n") % f)
711 else:
711 else:
712 self.dirstate.update([f], "r")
712 self.dirstate.update([f], "r")
713
713
714 def undelete(self, list, wlock=None):
714 def undelete(self, list, wlock=None):
715 p = self.dirstate.parents()[0]
715 p = self.dirstate.parents()[0]
716 mn = self.changelog.read(p)[0]
716 mn = self.changelog.read(p)[0]
717 mf = self.manifest.readflags(mn)
717 mf = self.manifest.readflags(mn)
718 m = self.manifest.read(mn)
718 m = self.manifest.read(mn)
719 if not wlock:
719 if not wlock:
720 wlock = self.wlock()
720 wlock = self.wlock()
721 for f in list:
721 for f in list:
722 if self.dirstate.state(f) not in "r":
722 if self.dirstate.state(f) not in "r":
723 self.ui.warn("%s not removed!\n" % f)
723 self.ui.warn("%s not removed!\n" % f)
724 else:
724 else:
725 t = self.file(f).read(m[f])
725 t = self.file(f).read(m[f])
726 self.wwrite(f, t)
726 self.wwrite(f, t)
727 util.set_exec(self.wjoin(f), mf[f])
727 util.set_exec(self.wjoin(f), mf[f])
728 self.dirstate.update([f], "n")
728 self.dirstate.update([f], "n")
729
729
730 def copy(self, source, dest, wlock=None):
730 def copy(self, source, dest, wlock=None):
731 p = self.wjoin(dest)
731 p = self.wjoin(dest)
732 if not os.path.exists(p):
732 if not os.path.exists(p):
733 self.ui.warn(_("%s does not exist!\n") % dest)
733 self.ui.warn(_("%s does not exist!\n") % dest)
734 elif not os.path.isfile(p):
734 elif not os.path.isfile(p):
735 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
735 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
736 else:
736 else:
737 if not wlock:
737 if not wlock:
738 wlock = self.wlock()
738 wlock = self.wlock()
739 if self.dirstate.state(dest) == '?':
739 if self.dirstate.state(dest) == '?':
740 self.dirstate.update([dest], "a")
740 self.dirstate.update([dest], "a")
741 self.dirstate.copy(source, dest)
741 self.dirstate.copy(source, dest)
742
742
743 def heads(self, start=None):
743 def heads(self, start=None):
744 heads = self.changelog.heads(start)
744 heads = self.changelog.heads(start)
745 # sort the output in rev descending order
745 # sort the output in rev descending order
746 heads = [(-self.changelog.rev(h), h) for h in heads]
746 heads = [(-self.changelog.rev(h), h) for h in heads]
747 heads.sort()
747 heads.sort()
748 return [n for (r, n) in heads]
748 return [n for (r, n) in heads]
749
749
750 # branchlookup returns a dict giving a list of branches for
750 # branchlookup returns a dict giving a list of branches for
751 # each head. A branch is defined as the tag of a node or
751 # each head. A branch is defined as the tag of a node or
752 # the branch of the node's parents. If a node has multiple
752 # the branch of the node's parents. If a node has multiple
753 # branch tags, tags are eliminated if they are visible from other
753 # branch tags, tags are eliminated if they are visible from other
754 # branch tags.
754 # branch tags.
755 #
755 #
756 # So, for this graph: a->b->c->d->e
756 # So, for this graph: a->b->c->d->e
757 # \ /
757 # \ /
758 # aa -----/
758 # aa -----/
759 # a has tag 2.6.12
759 # a has tag 2.6.12
760 # d has tag 2.6.13
760 # d has tag 2.6.13
761 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
761 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
762 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
762 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
763 # from the list.
763 # from the list.
764 #
764 #
765 # It is possible that more than one head will have the same branch tag.
765 # It is possible that more than one head will have the same branch tag.
766 # callers need to check the result for multiple heads under the same
766 # callers need to check the result for multiple heads under the same
767 # branch tag if that is a problem for them (ie checkout of a specific
767 # branch tag if that is a problem for them (ie checkout of a specific
768 # branch).
768 # branch).
769 #
769 #
770 # passing in a specific branch will limit the depth of the search
770 # passing in a specific branch will limit the depth of the search
771 # through the parents. It won't limit the branches returned in the
771 # through the parents. It won't limit the branches returned in the
772 # result though.
772 # result though.
773 def branchlookup(self, heads=None, branch=None):
773 def branchlookup(self, heads=None, branch=None):
774 if not heads:
774 if not heads:
775 heads = self.heads()
775 heads = self.heads()
776 headt = [ h for h in heads ]
776 headt = [ h for h in heads ]
777 chlog = self.changelog
777 chlog = self.changelog
778 branches = {}
778 branches = {}
779 merges = []
779 merges = []
780 seenmerge = {}
780 seenmerge = {}
781
781
782 # traverse the tree once for each head, recording in the branches
782 # traverse the tree once for each head, recording in the branches
783 # dict which tags are visible from this head. The branches
783 # dict which tags are visible from this head. The branches
784 # dict also records which tags are visible from each tag
784 # dict also records which tags are visible from each tag
785 # while we traverse.
785 # while we traverse.
786 while headt or merges:
786 while headt or merges:
787 if merges:
787 if merges:
788 n, found = merges.pop()
788 n, found = merges.pop()
789 visit = [n]
789 visit = [n]
790 else:
790 else:
791 h = headt.pop()
791 h = headt.pop()
792 visit = [h]
792 visit = [h]
793 found = [h]
793 found = [h]
794 seen = {}
794 seen = {}
795 while visit:
795 while visit:
796 n = visit.pop()
796 n = visit.pop()
797 if n in seen:
797 if n in seen:
798 continue
798 continue
799 pp = chlog.parents(n)
799 pp = chlog.parents(n)
800 tags = self.nodetags(n)
800 tags = self.nodetags(n)
801 if tags:
801 if tags:
802 for x in tags:
802 for x in tags:
803 if x == 'tip':
803 if x == 'tip':
804 continue
804 continue
805 for f in found:
805 for f in found:
806 branches.setdefault(f, {})[n] = 1
806 branches.setdefault(f, {})[n] = 1
807 branches.setdefault(n, {})[n] = 1
807 branches.setdefault(n, {})[n] = 1
808 break
808 break
809 if n not in found:
809 if n not in found:
810 found.append(n)
810 found.append(n)
811 if branch in tags:
811 if branch in tags:
812 continue
812 continue
813 seen[n] = 1
813 seen[n] = 1
814 if pp[1] != nullid and n not in seenmerge:
814 if pp[1] != nullid and n not in seenmerge:
815 merges.append((pp[1], [x for x in found]))
815 merges.append((pp[1], [x for x in found]))
816 seenmerge[n] = 1
816 seenmerge[n] = 1
817 if pp[0] != nullid:
817 if pp[0] != nullid:
818 visit.append(pp[0])
818 visit.append(pp[0])
819 # traverse the branches dict, eliminating branch tags from each
819 # traverse the branches dict, eliminating branch tags from each
820 # head that are visible from another branch tag for that head.
820 # head that are visible from another branch tag for that head.
821 out = {}
821 out = {}
822 viscache = {}
822 viscache = {}
823 for h in heads:
823 for h in heads:
824 def visible(node):
824 def visible(node):
825 if node in viscache:
825 if node in viscache:
826 return viscache[node]
826 return viscache[node]
827 ret = {}
827 ret = {}
828 visit = [node]
828 visit = [node]
829 while visit:
829 while visit:
830 x = visit.pop()
830 x = visit.pop()
831 if x in viscache:
831 if x in viscache:
832 ret.update(viscache[x])
832 ret.update(viscache[x])
833 elif x not in ret:
833 elif x not in ret:
834 ret[x] = 1
834 ret[x] = 1
835 if x in branches:
835 if x in branches:
836 visit[len(visit):] = branches[x].keys()
836 visit[len(visit):] = branches[x].keys()
837 viscache[node] = ret
837 viscache[node] = ret
838 return ret
838 return ret
839 if h not in branches:
839 if h not in branches:
840 continue
840 continue
841 # O(n^2), but somewhat limited. This only searches the
841 # O(n^2), but somewhat limited. This only searches the
842 # tags visible from a specific head, not all the tags in the
842 # tags visible from a specific head, not all the tags in the
843 # whole repo.
843 # whole repo.
844 for b in branches[h]:
844 for b in branches[h]:
845 vis = False
845 vis = False
846 for bb in branches[h].keys():
846 for bb in branches[h].keys():
847 if b != bb:
847 if b != bb:
848 if b in visible(bb):
848 if b in visible(bb):
849 vis = True
849 vis = True
850 break
850 break
851 if not vis:
851 if not vis:
852 l = out.setdefault(h, [])
852 l = out.setdefault(h, [])
853 l[len(l):] = self.nodetags(b)
853 l[len(l):] = self.nodetags(b)
854 return out
854 return out
855
855
856 def branches(self, nodes):
856 def branches(self, nodes):
857 if not nodes:
857 if not nodes:
858 nodes = [self.changelog.tip()]
858 nodes = [self.changelog.tip()]
859 b = []
859 b = []
860 for n in nodes:
860 for n in nodes:
861 t = n
861 t = n
862 while n:
862 while n:
863 p = self.changelog.parents(n)
863 p = self.changelog.parents(n)
864 if p[1] != nullid or p[0] == nullid:
864 if p[1] != nullid or p[0] == nullid:
865 b.append((t, n, p[0], p[1]))
865 b.append((t, n, p[0], p[1]))
866 break
866 break
867 n = p[0]
867 n = p[0]
868 return b
868 return b
869
869
870 def between(self, pairs):
870 def between(self, pairs):
871 r = []
871 r = []
872
872
873 for top, bottom in pairs:
873 for top, bottom in pairs:
874 n, l, i = top, [], 0
874 n, l, i = top, [], 0
875 f = 1
875 f = 1
876
876
877 while n != bottom:
877 while n != bottom:
878 p = self.changelog.parents(n)[0]
878 p = self.changelog.parents(n)[0]
879 if i == f:
879 if i == f:
880 l.append(n)
880 l.append(n)
881 f = f * 2
881 f = f * 2
882 n = p
882 n = p
883 i += 1
883 i += 1
884
884
885 r.append(l)
885 r.append(l)
886
886
887 return r
887 return r
888
888
889 def findincoming(self, remote, base=None, heads=None, force=False):
889 def findincoming(self, remote, base=None, heads=None, force=False):
890 m = self.changelog.nodemap
890 m = self.changelog.nodemap
891 search = []
891 search = []
892 fetch = {}
892 fetch = {}
893 seen = {}
893 seen = {}
894 seenbranch = {}
894 seenbranch = {}
895 if base == None:
895 if base == None:
896 base = {}
896 base = {}
897
897
898 if not heads:
898 if not heads:
899 heads = remote.heads()
899 heads = remote.heads()
900
900
901 if self.changelog.tip() == nullid:
901 if self.changelog.tip() == nullid:
902 if heads != [nullid]:
902 if heads != [nullid]:
903 return [nullid]
903 return [nullid]
904 return []
904 return []
905
905
906 # assume we're closer to the tip than the root
906 # assume we're closer to the tip than the root
907 # and start by examining the heads
907 # and start by examining the heads
908 self.ui.status(_("searching for changes\n"))
908 self.ui.status(_("searching for changes\n"))
909
909
910 unknown = []
910 unknown = []
911 for h in heads:
911 for h in heads:
912 if h not in m:
912 if h not in m:
913 unknown.append(h)
913 unknown.append(h)
914 else:
914 else:
915 base[h] = 1
915 base[h] = 1
916
916
917 if not unknown:
917 if not unknown:
918 return []
918 return []
919
919
920 rep = {}
920 rep = {}
921 reqcnt = 0
921 reqcnt = 0
922
922
923 # search through remote branches
923 # search through remote branches
924 # a 'branch' here is a linear segment of history, with four parts:
924 # a 'branch' here is a linear segment of history, with four parts:
925 # head, root, first parent, second parent
925 # head, root, first parent, second parent
926 # (a branch always has two parents (or none) by definition)
926 # (a branch always has two parents (or none) by definition)
927 unknown = remote.branches(unknown)
927 unknown = remote.branches(unknown)
928 while unknown:
928 while unknown:
929 r = []
929 r = []
930 while unknown:
930 while unknown:
931 n = unknown.pop(0)
931 n = unknown.pop(0)
932 if n[0] in seen:
932 if n[0] in seen:
933 continue
933 continue
934
934
935 self.ui.debug(_("examining %s:%s\n")
935 self.ui.debug(_("examining %s:%s\n")
936 % (short(n[0]), short(n[1])))
936 % (short(n[0]), short(n[1])))
937 if n[0] == nullid:
937 if n[0] == nullid:
938 break
938 break
939 if n in seenbranch:
939 if n in seenbranch:
940 self.ui.debug(_("branch already found\n"))
940 self.ui.debug(_("branch already found\n"))
941 continue
941 continue
942 if n[1] and n[1] in m: # do we know the base?
942 if n[1] and n[1] in m: # do we know the base?
943 self.ui.debug(_("found incomplete branch %s:%s\n")
943 self.ui.debug(_("found incomplete branch %s:%s\n")
944 % (short(n[0]), short(n[1])))
944 % (short(n[0]), short(n[1])))
945 search.append(n) # schedule branch range for scanning
945 search.append(n) # schedule branch range for scanning
946 seenbranch[n] = 1
946 seenbranch[n] = 1
947 else:
947 else:
948 if n[1] not in seen and n[1] not in fetch:
948 if n[1] not in seen and n[1] not in fetch:
949 if n[2] in m and n[3] in m:
949 if n[2] in m and n[3] in m:
950 self.ui.debug(_("found new changeset %s\n") %
950 self.ui.debug(_("found new changeset %s\n") %
951 short(n[1]))
951 short(n[1]))
952 fetch[n[1]] = 1 # earliest unknown
952 fetch[n[1]] = 1 # earliest unknown
953 base[n[2]] = 1 # latest known
953 base[n[2]] = 1 # latest known
954 continue
954 continue
955
955
956 for a in n[2:4]:
956 for a in n[2:4]:
957 if a not in rep:
957 if a not in rep:
958 r.append(a)
958 r.append(a)
959 rep[a] = 1
959 rep[a] = 1
960
960
961 seen[n[0]] = 1
961 seen[n[0]] = 1
962
962
963 if r:
963 if r:
964 reqcnt += 1
964 reqcnt += 1
965 self.ui.debug(_("request %d: %s\n") %
965 self.ui.debug(_("request %d: %s\n") %
966 (reqcnt, " ".join(map(short, r))))
966 (reqcnt, " ".join(map(short, r))))
967 for p in range(0, len(r), 10):
967 for p in range(0, len(r), 10):
968 for b in remote.branches(r[p:p+10]):
968 for b in remote.branches(r[p:p+10]):
969 self.ui.debug(_("received %s:%s\n") %
969 self.ui.debug(_("received %s:%s\n") %
970 (short(b[0]), short(b[1])))
970 (short(b[0]), short(b[1])))
971 if b[0] in m:
971 if b[0] in m:
972 self.ui.debug(_("found base node %s\n")
972 self.ui.debug(_("found base node %s\n")
973 % short(b[0]))
973 % short(b[0]))
974 base[b[0]] = 1
974 base[b[0]] = 1
975 elif b[0] not in seen:
975 elif b[0] not in seen:
976 unknown.append(b)
976 unknown.append(b)
977
977
978 # do binary search on the branches we found
978 # do binary search on the branches we found
979 while search:
979 while search:
980 n = search.pop(0)
980 n = search.pop(0)
981 reqcnt += 1
981 reqcnt += 1
982 l = remote.between([(n[0], n[1])])[0]
982 l = remote.between([(n[0], n[1])])[0]
983 l.append(n[1])
983 l.append(n[1])
984 p = n[0]
984 p = n[0]
985 f = 1
985 f = 1
986 for i in l:
986 for i in l:
987 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
987 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
988 if i in m:
988 if i in m:
989 if f <= 2:
989 if f <= 2:
990 self.ui.debug(_("found new branch changeset %s\n") %
990 self.ui.debug(_("found new branch changeset %s\n") %
991 short(p))
991 short(p))
992 fetch[p] = 1
992 fetch[p] = 1
993 base[i] = 1
993 base[i] = 1
994 else:
994 else:
995 self.ui.debug(_("narrowed branch search to %s:%s\n")
995 self.ui.debug(_("narrowed branch search to %s:%s\n")
996 % (short(p), short(i)))
996 % (short(p), short(i)))
997 search.append((p, i))
997 search.append((p, i))
998 break
998 break
999 p, f = i, f * 2
999 p, f = i, f * 2
1000
1000
1001 # sanity check our fetch list
1001 # sanity check our fetch list
1002 for f in fetch.keys():
1002 for f in fetch.keys():
1003 if f in m:
1003 if f in m:
1004 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1004 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1005
1005
1006 if base.keys() == [nullid]:
1006 if base.keys() == [nullid]:
1007 if force:
1007 if force:
1008 self.ui.warn(_("warning: repository is unrelated\n"))
1008 self.ui.warn(_("warning: repository is unrelated\n"))
1009 else:
1009 else:
1010 raise util.Abort(_("repository is unrelated"))
1010 raise util.Abort(_("repository is unrelated"))
1011
1011
1012 self.ui.note(_("found new changesets starting at ") +
1012 self.ui.note(_("found new changesets starting at ") +
1013 " ".join([short(f) for f in fetch]) + "\n")
1013 " ".join([short(f) for f in fetch]) + "\n")
1014
1014
1015 self.ui.debug(_("%d total queries\n") % reqcnt)
1015 self.ui.debug(_("%d total queries\n") % reqcnt)
1016
1016
1017 return fetch.keys()
1017 return fetch.keys()
1018
1018
1019 def findoutgoing(self, remote, base=None, heads=None, force=False):
1019 def findoutgoing(self, remote, base=None, heads=None, force=False):
1020 """Return list of nodes that are roots of subsets not in remote
1020 """Return list of nodes that are roots of subsets not in remote
1021
1021
1022 If base dict is specified, assume that these nodes and their parents
1022 If base dict is specified, assume that these nodes and their parents
1023 exist on the remote side.
1023 exist on the remote side.
1024 If a list of heads is specified, return only nodes which are heads
1024 If a list of heads is specified, return only nodes which are heads
1025 or ancestors of these heads, and return a second element which
1025 or ancestors of these heads, and return a second element which
1026 contains all remote heads which get new children.
1026 contains all remote heads which get new children.
1027 """
1027 """
1028 if base == None:
1028 if base == None:
1029 base = {}
1029 base = {}
1030 self.findincoming(remote, base, heads, force=force)
1030 self.findincoming(remote, base, heads, force=force)
1031
1031
1032 self.ui.debug(_("common changesets up to ")
1032 self.ui.debug(_("common changesets up to ")
1033 + " ".join(map(short, base.keys())) + "\n")
1033 + " ".join(map(short, base.keys())) + "\n")
1034
1034
1035 remain = dict.fromkeys(self.changelog.nodemap)
1035 remain = dict.fromkeys(self.changelog.nodemap)
1036
1036
1037 # prune everything remote has from the tree
1037 # prune everything remote has from the tree
1038 del remain[nullid]
1038 del remain[nullid]
1039 remove = base.keys()
1039 remove = base.keys()
1040 while remove:
1040 while remove:
1041 n = remove.pop(0)
1041 n = remove.pop(0)
1042 if n in remain:
1042 if n in remain:
1043 del remain[n]
1043 del remain[n]
1044 for p in self.changelog.parents(n):
1044 for p in self.changelog.parents(n):
1045 remove.append(p)
1045 remove.append(p)
1046
1046
1047 # find every node whose parents have been pruned
1047 # find every node whose parents have been pruned
1048 subset = []
1048 subset = []
1049 # find every remote head that will get new children
1049 # find every remote head that will get new children
1050 updated_heads = {}
1050 updated_heads = {}
1051 for n in remain:
1051 for n in remain:
1052 p1, p2 = self.changelog.parents(n)
1052 p1, p2 = self.changelog.parents(n)
1053 if p1 not in remain and p2 not in remain:
1053 if p1 not in remain and p2 not in remain:
1054 subset.append(n)
1054 subset.append(n)
1055 if heads:
1055 if heads:
1056 if p1 in heads:
1056 if p1 in heads:
1057 updated_heads[p1] = True
1057 updated_heads[p1] = True
1058 if p2 in heads:
1058 if p2 in heads:
1059 updated_heads[p2] = True
1059 updated_heads[p2] = True
1060
1060
1061 # this is the set of all roots we have to push
1061 # this is the set of all roots we have to push
1062 if heads:
1062 if heads:
1063 return subset, updated_heads.keys()
1063 return subset, updated_heads.keys()
1064 else:
1064 else:
1065 return subset
1065 return subset
1066
1066
1067 def pull(self, remote, heads=None, force=False):
1067 def pull(self, remote, heads=None, force=False):
1068 l = self.lock()
1068 l = self.lock()
1069
1069
1070 fetch = self.findincoming(remote, force=force)
1070 fetch = self.findincoming(remote, force=force)
1071 if fetch == [nullid]:
1071 if fetch == [nullid]:
1072 self.ui.status(_("requesting all changes\n"))
1072 self.ui.status(_("requesting all changes\n"))
1073
1073
1074 if not fetch:
1074 if not fetch:
1075 self.ui.status(_("no changes found\n"))
1075 self.ui.status(_("no changes found\n"))
1076 return 0
1076 return 0
1077
1077
1078 if heads is None:
1078 if heads is None:
1079 cg = remote.changegroup(fetch, 'pull')
1079 cg = remote.changegroup(fetch, 'pull')
1080 else:
1080 else:
1081 cg = remote.changegroupsubset(fetch, heads, 'pull')
1081 cg = remote.changegroupsubset(fetch, heads, 'pull')
1082 return self.addchangegroup(cg, 'pull')
1082 return self.addchangegroup(cg, 'pull')
1083
1083
1084 def push(self, remote, force=False, revs=None):
1084 def push(self, remote, force=False, revs=None):
1085 lock = remote.lock()
1085 lock = remote.lock()
1086
1086
1087 base = {}
1087 base = {}
1088 remote_heads = remote.heads()
1088 remote_heads = remote.heads()
1089 inc = self.findincoming(remote, base, remote_heads, force=force)
1089 inc = self.findincoming(remote, base, remote_heads, force=force)
1090 if not force and inc:
1090 if not force and inc:
1091 self.ui.warn(_("abort: unsynced remote changes!\n"))
1091 self.ui.warn(_("abort: unsynced remote changes!\n"))
1092 self.ui.status(_("(did you forget to sync?"
1092 self.ui.status(_("(did you forget to sync?"
1093 " use push -f to force)\n"))
1093 " use push -f to force)\n"))
1094 return 1
1094 return 1
1095
1095
1096 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1096 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1097 if revs is not None:
1097 if revs is not None:
1098 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1098 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1099 else:
1099 else:
1100 bases, heads = update, self.changelog.heads()
1100 bases, heads = update, self.changelog.heads()
1101
1101
1102 if not bases:
1102 if not bases:
1103 self.ui.status(_("no changes found\n"))
1103 self.ui.status(_("no changes found\n"))
1104 return 1
1104 return 1
1105 elif not force:
1105 elif not force:
1106 # FIXME we don't properly detect creation of new heads
1106 # FIXME we don't properly detect creation of new heads
1107 # in the push -r case, assume the user knows what he's doing
1107 # in the push -r case, assume the user knows what he's doing
1108 if not revs and len(remote_heads) < len(heads) \
1108 if not revs and len(remote_heads) < len(heads) \
1109 and remote_heads != [nullid]:
1109 and remote_heads != [nullid]:
1110 self.ui.warn(_("abort: push creates new remote branches!\n"))
1110 self.ui.warn(_("abort: push creates new remote branches!\n"))
1111 self.ui.status(_("(did you forget to merge?"
1111 self.ui.status(_("(did you forget to merge?"
1112 " use push -f to force)\n"))
1112 " use push -f to force)\n"))
1113 return 1
1113 return 1
1114
1114
1115 if revs is None:
1115 if revs is None:
1116 cg = self.changegroup(update, 'push')
1116 cg = self.changegroup(update, 'push')
1117 else:
1117 else:
1118 cg = self.changegroupsubset(update, revs, 'push')
1118 cg = self.changegroupsubset(update, revs, 'push')
1119 return remote.addchangegroup(cg, 'push')
1119 return remote.addchangegroup(cg, 'push')
1120
1120
1121 def changegroupsubset(self, bases, heads, source):
1121 def changegroupsubset(self, bases, heads, source):
1122 """This function generates a changegroup consisting of all the nodes
1122 """This function generates a changegroup consisting of all the nodes
1123 that are descendents of any of the bases, and ancestors of any of
1123 that are descendents of any of the bases, and ancestors of any of
1124 the heads.
1124 the heads.
1125
1125
1126 It is fairly complex as determining which filenodes and which
1126 It is fairly complex as determining which filenodes and which
1127 manifest nodes need to be included for the changeset to be complete
1127 manifest nodes need to be included for the changeset to be complete
1128 is non-trivial.
1128 is non-trivial.
1129
1129
1130 Another wrinkle is doing the reverse, figuring out which changeset in
1130 Another wrinkle is doing the reverse, figuring out which changeset in
1131 the changegroup a particular filenode or manifestnode belongs to."""
1131 the changegroup a particular filenode or manifestnode belongs to."""
1132
1132
1133 self.hook('preoutgoing', throw=True, source=source)
1133 self.hook('preoutgoing', throw=True, source=source)
1134
1134
1135 # Set up some initial variables
1135 # Set up some initial variables
1136 # Make it easy to refer to self.changelog
1136 # Make it easy to refer to self.changelog
1137 cl = self.changelog
1137 cl = self.changelog
1138 # msng is short for missing - compute the list of changesets in this
1138 # msng is short for missing - compute the list of changesets in this
1139 # changegroup.
1139 # changegroup.
1140 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1140 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1141 # Some bases may turn out to be superfluous, and some heads may be
1141 # Some bases may turn out to be superfluous, and some heads may be
1142 # too. nodesbetween will return the minimal set of bases and heads
1142 # too. nodesbetween will return the minimal set of bases and heads
1143 # necessary to re-create the changegroup.
1143 # necessary to re-create the changegroup.
1144
1144
1145 # Known heads are the list of heads that it is assumed the recipient
1145 # Known heads are the list of heads that it is assumed the recipient
1146 # of this changegroup will know about.
1146 # of this changegroup will know about.
1147 knownheads = {}
1147 knownheads = {}
1148 # We assume that all parents of bases are known heads.
1148 # We assume that all parents of bases are known heads.
1149 for n in bases:
1149 for n in bases:
1150 for p in cl.parents(n):
1150 for p in cl.parents(n):
1151 if p != nullid:
1151 if p != nullid:
1152 knownheads[p] = 1
1152 knownheads[p] = 1
1153 knownheads = knownheads.keys()
1153 knownheads = knownheads.keys()
1154 if knownheads:
1154 if knownheads:
1155 # Now that we know what heads are known, we can compute which
1155 # Now that we know what heads are known, we can compute which
1156 # changesets are known. The recipient must know about all
1156 # changesets are known. The recipient must know about all
1157 # changesets required to reach the known heads from the null
1157 # changesets required to reach the known heads from the null
1158 # changeset.
1158 # changeset.
1159 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1159 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1160 junk = None
1160 junk = None
1161 # Transform the list into an ersatz set.
1161 # Transform the list into an ersatz set.
1162 has_cl_set = dict.fromkeys(has_cl_set)
1162 has_cl_set = dict.fromkeys(has_cl_set)
1163 else:
1163 else:
1164 # If there were no known heads, the recipient cannot be assumed to
1164 # If there were no known heads, the recipient cannot be assumed to
1165 # know about any changesets.
1165 # know about any changesets.
1166 has_cl_set = {}
1166 has_cl_set = {}
1167
1167
1168 # Make it easy to refer to self.manifest
1168 # Make it easy to refer to self.manifest
1169 mnfst = self.manifest
1169 mnfst = self.manifest
1170 # We don't know which manifests are missing yet
1170 # We don't know which manifests are missing yet
1171 msng_mnfst_set = {}
1171 msng_mnfst_set = {}
1172 # Nor do we know which filenodes are missing.
1172 # Nor do we know which filenodes are missing.
1173 msng_filenode_set = {}
1173 msng_filenode_set = {}
1174
1174
1175 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1175 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1176 junk = None
1176 junk = None
1177
1177
1178 # A changeset always belongs to itself, so the changenode lookup
1178 # A changeset always belongs to itself, so the changenode lookup
1179 # function for a changenode is identity.
1179 # function for a changenode is identity.
1180 def identity(x):
1180 def identity(x):
1181 return x
1181 return x
1182
1182
1183 # A function generating function. Sets up an environment for the
1183 # A function generating function. Sets up an environment for the
1184 # inner function.
1184 # inner function.
1185 def cmp_by_rev_func(revlog):
1185 def cmp_by_rev_func(revlog):
1186 # Compare two nodes by their revision number in the environment's
1186 # Compare two nodes by their revision number in the environment's
1187 # revision history. Since the revision number both represents the
1187 # revision history. Since the revision number both represents the
1188 # most efficient order to read the nodes in, and represents a
1188 # most efficient order to read the nodes in, and represents a
1189 # topological sorting of the nodes, this function is often useful.
1189 # topological sorting of the nodes, this function is often useful.
1190 def cmp_by_rev(a, b):
1190 def cmp_by_rev(a, b):
1191 return cmp(revlog.rev(a), revlog.rev(b))
1191 return cmp(revlog.rev(a), revlog.rev(b))
1192 return cmp_by_rev
1192 return cmp_by_rev
1193
1193
1194 # If we determine that a particular file or manifest node must be a
1194 # If we determine that a particular file or manifest node must be a
1195 # node that the recipient of the changegroup will already have, we can
1195 # node that the recipient of the changegroup will already have, we can
1196 # also assume the recipient will have all the parents. This function
1196 # also assume the recipient will have all the parents. This function
1197 # prunes them from the set of missing nodes.
1197 # prunes them from the set of missing nodes.
1198 def prune_parents(revlog, hasset, msngset):
1198 def prune_parents(revlog, hasset, msngset):
1199 haslst = hasset.keys()
1199 haslst = hasset.keys()
1200 haslst.sort(cmp_by_rev_func(revlog))
1200 haslst.sort(cmp_by_rev_func(revlog))
1201 for node in haslst:
1201 for node in haslst:
1202 parentlst = [p for p in revlog.parents(node) if p != nullid]
1202 parentlst = [p for p in revlog.parents(node) if p != nullid]
1203 while parentlst:
1203 while parentlst:
1204 n = parentlst.pop()
1204 n = parentlst.pop()
1205 if n not in hasset:
1205 if n not in hasset:
1206 hasset[n] = 1
1206 hasset[n] = 1
1207 p = [p for p in revlog.parents(n) if p != nullid]
1207 p = [p for p in revlog.parents(n) if p != nullid]
1208 parentlst.extend(p)
1208 parentlst.extend(p)
1209 for n in hasset:
1209 for n in hasset:
1210 msngset.pop(n, None)
1210 msngset.pop(n, None)
1211
1211
1212 # This is a function generating function used to set up an environment
1212 # This is a function generating function used to set up an environment
1213 # for the inner function to execute in.
1213 # for the inner function to execute in.
1214 def manifest_and_file_collector(changedfileset):
1214 def manifest_and_file_collector(changedfileset):
1215 # This is an information gathering function that gathers
1215 # This is an information gathering function that gathers
1216 # information from each changeset node that goes out as part of
1216 # information from each changeset node that goes out as part of
1217 # the changegroup. The information gathered is a list of which
1217 # the changegroup. The information gathered is a list of which
1218 # manifest nodes are potentially required (the recipient may
1218 # manifest nodes are potentially required (the recipient may
1219 # already have them) and total list of all files which were
1219 # already have them) and total list of all files which were
1220 # changed in any changeset in the changegroup.
1220 # changed in any changeset in the changegroup.
1221 #
1221 #
1222 # We also remember the first changenode we saw any manifest
1222 # We also remember the first changenode we saw any manifest
1223 # referenced by so we can later determine which changenode 'owns'
1223 # referenced by so we can later determine which changenode 'owns'
1224 # the manifest.
1224 # the manifest.
1225 def collect_manifests_and_files(clnode):
1225 def collect_manifests_and_files(clnode):
1226 c = cl.read(clnode)
1226 c = cl.read(clnode)
1227 for f in c[3]:
1227 for f in c[3]:
1228 # This is to make sure we only have one instance of each
1228 # This is to make sure we only have one instance of each
1229 # filename string for each filename.
1229 # filename string for each filename.
1230 changedfileset.setdefault(f, f)
1230 changedfileset.setdefault(f, f)
1231 msng_mnfst_set.setdefault(c[0], clnode)
1231 msng_mnfst_set.setdefault(c[0], clnode)
1232 return collect_manifests_and_files
1232 return collect_manifests_and_files
1233
1233
1234 # Figure out which manifest nodes (of the ones we think might be part
1234 # Figure out which manifest nodes (of the ones we think might be part
1235 # of the changegroup) the recipient must know about and remove them
1235 # of the changegroup) the recipient must know about and remove them
1236 # from the changegroup.
1236 # from the changegroup.
1237 def prune_manifests():
1237 def prune_manifests():
1238 has_mnfst_set = {}
1238 has_mnfst_set = {}
1239 for n in msng_mnfst_set:
1239 for n in msng_mnfst_set:
1240 # If a 'missing' manifest thinks it belongs to a changenode
1240 # If a 'missing' manifest thinks it belongs to a changenode
1241 # the recipient is assumed to have, obviously the recipient
1241 # the recipient is assumed to have, obviously the recipient
1242 # must have that manifest.
1242 # must have that manifest.
1243 linknode = cl.node(mnfst.linkrev(n))
1243 linknode = cl.node(mnfst.linkrev(n))
1244 if linknode in has_cl_set:
1244 if linknode in has_cl_set:
1245 has_mnfst_set[n] = 1
1245 has_mnfst_set[n] = 1
1246 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1246 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1247
1247
1248 # Use the information collected in collect_manifests_and_files to say
1248 # Use the information collected in collect_manifests_and_files to say
1249 # which changenode any manifestnode belongs to.
1249 # which changenode any manifestnode belongs to.
1250 def lookup_manifest_link(mnfstnode):
1250 def lookup_manifest_link(mnfstnode):
1251 return msng_mnfst_set[mnfstnode]
1251 return msng_mnfst_set[mnfstnode]
1252
1252
1253 # A function generating function that sets up the initial environment
1253 # A function generating function that sets up the initial environment
1254 # the inner function.
1254 # the inner function.
1255 def filenode_collector(changedfiles):
1255 def filenode_collector(changedfiles):
1256 next_rev = [0]
1256 next_rev = [0]
1257 # This gathers information from each manifestnode included in the
1257 # This gathers information from each manifestnode included in the
1258 # changegroup about which filenodes the manifest node references
1258 # changegroup about which filenodes the manifest node references
1259 # so we can include those in the changegroup too.
1259 # so we can include those in the changegroup too.
1260 #
1260 #
1261 # It also remembers which changenode each filenode belongs to. It
1261 # It also remembers which changenode each filenode belongs to. It
1262 # does this by assuming the a filenode belongs to the changenode
1262 # does this by assuming the a filenode belongs to the changenode
1263 # the first manifest that references it belongs to.
1263 # the first manifest that references it belongs to.
1264 def collect_msng_filenodes(mnfstnode):
1264 def collect_msng_filenodes(mnfstnode):
1265 r = mnfst.rev(mnfstnode)
1265 r = mnfst.rev(mnfstnode)
1266 if r == next_rev[0]:
1266 if r == next_rev[0]:
1267 # If the last rev we looked at was the one just previous,
1267 # If the last rev we looked at was the one just previous,
1268 # we only need to see a diff.
1268 # we only need to see a diff.
1269 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1269 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1270 # For each line in the delta
1270 # For each line in the delta
1271 for dline in delta.splitlines():
1271 for dline in delta.splitlines():
1272 # get the filename and filenode for that line
1272 # get the filename and filenode for that line
1273 f, fnode = dline.split('\0')
1273 f, fnode = dline.split('\0')
1274 fnode = bin(fnode[:40])
1274 fnode = bin(fnode[:40])
1275 f = changedfiles.get(f, None)
1275 f = changedfiles.get(f, None)
1276 # And if the file is in the list of files we care
1276 # And if the file is in the list of files we care
1277 # about.
1277 # about.
1278 if f is not None:
1278 if f is not None:
1279 # Get the changenode this manifest belongs to
1279 # Get the changenode this manifest belongs to
1280 clnode = msng_mnfst_set[mnfstnode]
1280 clnode = msng_mnfst_set[mnfstnode]
1281 # Create the set of filenodes for the file if
1281 # Create the set of filenodes for the file if
1282 # there isn't one already.
1282 # there isn't one already.
1283 ndset = msng_filenode_set.setdefault(f, {})
1283 ndset = msng_filenode_set.setdefault(f, {})
1284 # And set the filenode's changelog node to the
1284 # And set the filenode's changelog node to the
1285 # manifest's if it hasn't been set already.
1285 # manifest's if it hasn't been set already.
1286 ndset.setdefault(fnode, clnode)
1286 ndset.setdefault(fnode, clnode)
1287 else:
1287 else:
1288 # Otherwise we need a full manifest.
1288 # Otherwise we need a full manifest.
1289 m = mnfst.read(mnfstnode)
1289 m = mnfst.read(mnfstnode)
1290 # For every file in we care about.
1290 # For every file in we care about.
1291 for f in changedfiles:
1291 for f in changedfiles:
1292 fnode = m.get(f, None)
1292 fnode = m.get(f, None)
1293 # If it's in the manifest
1293 # If it's in the manifest
1294 if fnode is not None:
1294 if fnode is not None:
1295 # See comments above.
1295 # See comments above.
1296 clnode = msng_mnfst_set[mnfstnode]
1296 clnode = msng_mnfst_set[mnfstnode]
1297 ndset = msng_filenode_set.setdefault(f, {})
1297 ndset = msng_filenode_set.setdefault(f, {})
1298 ndset.setdefault(fnode, clnode)
1298 ndset.setdefault(fnode, clnode)
1299 # Remember the revision we hope to see next.
1299 # Remember the revision we hope to see next.
1300 next_rev[0] = r + 1
1300 next_rev[0] = r + 1
1301 return collect_msng_filenodes
1301 return collect_msng_filenodes
1302
1302
1303 # We have a list of filenodes we think we need for a file, lets remove
1303 # We have a list of filenodes we think we need for a file, lets remove
1304 # all those we now the recipient must have.
1304 # all those we now the recipient must have.
1305 def prune_filenodes(f, filerevlog):
1305 def prune_filenodes(f, filerevlog):
1306 msngset = msng_filenode_set[f]
1306 msngset = msng_filenode_set[f]
1307 hasset = {}
1307 hasset = {}
1308 # If a 'missing' filenode thinks it belongs to a changenode we
1308 # If a 'missing' filenode thinks it belongs to a changenode we
1309 # assume the recipient must have, then the recipient must have
1309 # assume the recipient must have, then the recipient must have
1310 # that filenode.
1310 # that filenode.
1311 for n in msngset:
1311 for n in msngset:
1312 clnode = cl.node(filerevlog.linkrev(n))
1312 clnode = cl.node(filerevlog.linkrev(n))
1313 if clnode in has_cl_set:
1313 if clnode in has_cl_set:
1314 hasset[n] = 1
1314 hasset[n] = 1
1315 prune_parents(filerevlog, hasset, msngset)
1315 prune_parents(filerevlog, hasset, msngset)
1316
1316
1317 # A function generator function that sets up the a context for the
1317 # A function generator function that sets up the a context for the
1318 # inner function.
1318 # inner function.
1319 def lookup_filenode_link_func(fname):
1319 def lookup_filenode_link_func(fname):
1320 msngset = msng_filenode_set[fname]
1320 msngset = msng_filenode_set[fname]
1321 # Lookup the changenode the filenode belongs to.
1321 # Lookup the changenode the filenode belongs to.
1322 def lookup_filenode_link(fnode):
1322 def lookup_filenode_link(fnode):
1323 return msngset[fnode]
1323 return msngset[fnode]
1324 return lookup_filenode_link
1324 return lookup_filenode_link
1325
1325
1326 # Now that we have all theses utility functions to help out and
1326 # Now that we have all theses utility functions to help out and
1327 # logically divide up the task, generate the group.
1327 # logically divide up the task, generate the group.
1328 def gengroup():
1328 def gengroup():
1329 # The set of changed files starts empty.
1329 # The set of changed files starts empty.
1330 changedfiles = {}
1330 changedfiles = {}
1331 # Create a changenode group generator that will call our functions
1331 # Create a changenode group generator that will call our functions
1332 # back to lookup the owning changenode and collect information.
1332 # back to lookup the owning changenode and collect information.
1333 group = cl.group(msng_cl_lst, identity,
1333 group = cl.group(msng_cl_lst, identity,
1334 manifest_and_file_collector(changedfiles))
1334 manifest_and_file_collector(changedfiles))
1335 for chnk in group:
1335 for chnk in group:
1336 yield chnk
1336 yield chnk
1337
1337
1338 # The list of manifests has been collected by the generator
1338 # The list of manifests has been collected by the generator
1339 # calling our functions back.
1339 # calling our functions back.
1340 prune_manifests()
1340 prune_manifests()
1341 msng_mnfst_lst = msng_mnfst_set.keys()
1341 msng_mnfst_lst = msng_mnfst_set.keys()
1342 # Sort the manifestnodes by revision number.
1342 # Sort the manifestnodes by revision number.
1343 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1343 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1344 # Create a generator for the manifestnodes that calls our lookup
1344 # Create a generator for the manifestnodes that calls our lookup
1345 # and data collection functions back.
1345 # and data collection functions back.
1346 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1346 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1347 filenode_collector(changedfiles))
1347 filenode_collector(changedfiles))
1348 for chnk in group:
1348 for chnk in group:
1349 yield chnk
1349 yield chnk
1350
1350
1351 # These are no longer needed, dereference and toss the memory for
1351 # These are no longer needed, dereference and toss the memory for
1352 # them.
1352 # them.
1353 msng_mnfst_lst = None
1353 msng_mnfst_lst = None
1354 msng_mnfst_set.clear()
1354 msng_mnfst_set.clear()
1355
1355
1356 changedfiles = changedfiles.keys()
1356 changedfiles = changedfiles.keys()
1357 changedfiles.sort()
1357 changedfiles.sort()
1358 # Go through all our files in order sorted by name.
1358 # Go through all our files in order sorted by name.
1359 for fname in changedfiles:
1359 for fname in changedfiles:
1360 filerevlog = self.file(fname)
1360 filerevlog = self.file(fname)
1361 # Toss out the filenodes that the recipient isn't really
1361 # Toss out the filenodes that the recipient isn't really
1362 # missing.
1362 # missing.
1363 if msng_filenode_set.has_key(fname):
1363 if msng_filenode_set.has_key(fname):
1364 prune_filenodes(fname, filerevlog)
1364 prune_filenodes(fname, filerevlog)
1365 msng_filenode_lst = msng_filenode_set[fname].keys()
1365 msng_filenode_lst = msng_filenode_set[fname].keys()
1366 else:
1366 else:
1367 msng_filenode_lst = []
1367 msng_filenode_lst = []
1368 # If any filenodes are left, generate the group for them,
1368 # If any filenodes are left, generate the group for them,
1369 # otherwise don't bother.
1369 # otherwise don't bother.
1370 if len(msng_filenode_lst) > 0:
1370 if len(msng_filenode_lst) > 0:
1371 yield changegroup.genchunk(fname)
1371 yield changegroup.genchunk(fname)
1372 # Sort the filenodes by their revision #
1372 # Sort the filenodes by their revision #
1373 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1373 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1374 # Create a group generator and only pass in a changenode
1374 # Create a group generator and only pass in a changenode
1375 # lookup function as we need to collect no information
1375 # lookup function as we need to collect no information
1376 # from filenodes.
1376 # from filenodes.
1377 group = filerevlog.group(msng_filenode_lst,
1377 group = filerevlog.group(msng_filenode_lst,
1378 lookup_filenode_link_func(fname))
1378 lookup_filenode_link_func(fname))
1379 for chnk in group:
1379 for chnk in group:
1380 yield chnk
1380 yield chnk
1381 if msng_filenode_set.has_key(fname):
1381 if msng_filenode_set.has_key(fname):
1382 # Don't need this anymore, toss it to free memory.
1382 # Don't need this anymore, toss it to free memory.
1383 del msng_filenode_set[fname]
1383 del msng_filenode_set[fname]
1384 # Signal that no more groups are left.
1384 # Signal that no more groups are left.
1385 yield changegroup.closechunk()
1385 yield changegroup.closechunk()
1386
1386
1387 if msng_cl_lst:
1387 if msng_cl_lst:
1388 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1388 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1389
1389
1390 return util.chunkbuffer(gengroup())
1390 return util.chunkbuffer(gengroup())
1391
1391
1392 def changegroup(self, basenodes, source):
1392 def changegroup(self, basenodes, source):
1393 """Generate a changegroup of all nodes that we have that a recipient
1393 """Generate a changegroup of all nodes that we have that a recipient
1394 doesn't.
1394 doesn't.
1395
1395
1396 This is much easier than the previous function as we can assume that
1396 This is much easier than the previous function as we can assume that
1397 the recipient has any changenode we aren't sending them."""
1397 the recipient has any changenode we aren't sending them."""
1398
1398
1399 self.hook('preoutgoing', throw=True, source=source)
1399 self.hook('preoutgoing', throw=True, source=source)
1400
1400
1401 cl = self.changelog
1401 cl = self.changelog
1402 nodes = cl.nodesbetween(basenodes, None)[0]
1402 nodes = cl.nodesbetween(basenodes, None)[0]
1403 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1403 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1404
1404
1405 def identity(x):
1405 def identity(x):
1406 return x
1406 return x
1407
1407
1408 def gennodelst(revlog):
1408 def gennodelst(revlog):
1409 for r in xrange(0, revlog.count()):
1409 for r in xrange(0, revlog.count()):
1410 n = revlog.node(r)
1410 n = revlog.node(r)
1411 if revlog.linkrev(n) in revset:
1411 if revlog.linkrev(n) in revset:
1412 yield n
1412 yield n
1413
1413
1414 def changed_file_collector(changedfileset):
1414 def changed_file_collector(changedfileset):
1415 def collect_changed_files(clnode):
1415 def collect_changed_files(clnode):
1416 c = cl.read(clnode)
1416 c = cl.read(clnode)
1417 for fname in c[3]:
1417 for fname in c[3]:
1418 changedfileset[fname] = 1
1418 changedfileset[fname] = 1
1419 return collect_changed_files
1419 return collect_changed_files
1420
1420
1421 def lookuprevlink_func(revlog):
1421 def lookuprevlink_func(revlog):
1422 def lookuprevlink(n):
1422 def lookuprevlink(n):
1423 return cl.node(revlog.linkrev(n))
1423 return cl.node(revlog.linkrev(n))
1424 return lookuprevlink
1424 return lookuprevlink
1425
1425
1426 def gengroup():
1426 def gengroup():
1427 # construct a list of all changed files
1427 # construct a list of all changed files
1428 changedfiles = {}
1428 changedfiles = {}
1429
1429
1430 for chnk in cl.group(nodes, identity,
1430 for chnk in cl.group(nodes, identity,
1431 changed_file_collector(changedfiles)):
1431 changed_file_collector(changedfiles)):
1432 yield chnk
1432 yield chnk
1433 changedfiles = changedfiles.keys()
1433 changedfiles = changedfiles.keys()
1434 changedfiles.sort()
1434 changedfiles.sort()
1435
1435
1436 mnfst = self.manifest
1436 mnfst = self.manifest
1437 nodeiter = gennodelst(mnfst)
1437 nodeiter = gennodelst(mnfst)
1438 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1438 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1439 yield chnk
1439 yield chnk
1440
1440
1441 for fname in changedfiles:
1441 for fname in changedfiles:
1442 filerevlog = self.file(fname)
1442 filerevlog = self.file(fname)
1443 nodeiter = gennodelst(filerevlog)
1443 nodeiter = gennodelst(filerevlog)
1444 nodeiter = list(nodeiter)
1444 nodeiter = list(nodeiter)
1445 if nodeiter:
1445 if nodeiter:
1446 yield changegroup.genchunk(fname)
1446 yield changegroup.genchunk(fname)
1447 lookup = lookuprevlink_func(filerevlog)
1447 lookup = lookuprevlink_func(filerevlog)
1448 for chnk in filerevlog.group(nodeiter, lookup):
1448 for chnk in filerevlog.group(nodeiter, lookup):
1449 yield chnk
1449 yield chnk
1450
1450
1451 yield changegroup.closechunk()
1451 yield changegroup.closechunk()
1452
1452
1453 if nodes:
1453 if nodes:
1454 self.hook('outgoing', node=hex(nodes[0]), source=source)
1454 self.hook('outgoing', node=hex(nodes[0]), source=source)
1455
1455
1456 return util.chunkbuffer(gengroup())
1456 return util.chunkbuffer(gengroup())
1457
1457
1458 def addchangegroup(self, source, srctype):
1458 def addchangegroup(self, source, srctype):
1459 """add changegroup to repo.
1459 """add changegroup to repo.
1460 returns number of heads modified or added + 1."""
1460 returns number of heads modified or added + 1."""
1461
1461
1462 def csmap(x):
1462 def csmap(x):
1463 self.ui.debug(_("add changeset %s\n") % short(x))
1463 self.ui.debug(_("add changeset %s\n") % short(x))
1464 return cl.count()
1464 return cl.count()
1465
1465
1466 def revmap(x):
1466 def revmap(x):
1467 return cl.rev(x)
1467 return cl.rev(x)
1468
1468
1469 if not source:
1469 if not source:
1470 return 0
1470 return 0
1471
1471
1472 self.hook('prechangegroup', throw=True, source=srctype)
1472 self.hook('prechangegroup', throw=True, source=srctype)
1473
1473
1474 changesets = files = revisions = 0
1474 changesets = files = revisions = 0
1475
1475
1476 tr = self.transaction()
1476 tr = self.transaction()
1477
1477
1478 # write changelog and manifest data to temp files so
1478 # write changelog and manifest data to temp files so
1479 # concurrent readers will not see inconsistent view
1479 # concurrent readers will not see inconsistent view
1480 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1480 cl = None
1481 try:
1482 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1481
1483
1482 oldheads = len(cl.heads())
1484 oldheads = len(cl.heads())
1483
1485
1484 # pull off the changeset group
1486 # pull off the changeset group
1485 self.ui.status(_("adding changesets\n"))
1487 self.ui.status(_("adding changesets\n"))
1486 co = cl.tip()
1488 co = cl.tip()
1487 chunkiter = changegroup.chunkiter(source)
1489 chunkiter = changegroup.chunkiter(source)
1488 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1490 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1489 cnr, cor = map(cl.rev, (cn, co))
1491 cnr, cor = map(cl.rev, (cn, co))
1490 if cn == nullid:
1492 if cn == nullid:
1491 cnr = cor
1493 cnr = cor
1492 changesets = cnr - cor
1494 changesets = cnr - cor
1493
1495
1494 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1496 mf = None
1497 try:
1498 mf = appendfile.appendmanifest(self.opener,
1499 self.manifest.version)
1495
1500
1496 # pull off the manifest group
1501 # pull off the manifest group
1497 self.ui.status(_("adding manifests\n"))
1502 self.ui.status(_("adding manifests\n"))
1498 mm = mf.tip()
1503 mm = mf.tip()
1499 chunkiter = changegroup.chunkiter(source)
1504 chunkiter = changegroup.chunkiter(source)
1500 mo = mf.addgroup(chunkiter, revmap, tr)
1505 mo = mf.addgroup(chunkiter, revmap, tr)
1501
1506
1502 # process the files
1507 # process the files
1503 self.ui.status(_("adding file changes\n"))
1508 self.ui.status(_("adding file changes\n"))
1504 while 1:
1509 while 1:
1505 f = changegroup.getchunk(source)
1510 f = changegroup.getchunk(source)
1506 if not f:
1511 if not f:
1507 break
1512 break
1508 self.ui.debug(_("adding %s revisions\n") % f)
1513 self.ui.debug(_("adding %s revisions\n") % f)
1509 fl = self.file(f)
1514 fl = self.file(f)
1510 o = fl.count()
1515 o = fl.count()
1511 chunkiter = changegroup.chunkiter(source)
1516 chunkiter = changegroup.chunkiter(source)
1512 n = fl.addgroup(chunkiter, revmap, tr)
1517 n = fl.addgroup(chunkiter, revmap, tr)
1513 revisions += fl.count() - o
1518 revisions += fl.count() - o
1514 files += 1
1519 files += 1
1515
1520
1516 # write order here is important so concurrent readers will see
1521 # write order here is important so concurrent readers will see
1517 # consistent view of repo
1522 # consistent view of repo
1518 mf.writedata()
1523 mf.writedata()
1519 cl.writedata()
1524 finally:
1525 if mf:
1526 mf.cleanup()
1527 cl.writedata()
1528 finally:
1529 if cl:
1530 cl.cleanup()
1520
1531
1521 # make changelog and manifest see real files again
1532 # make changelog and manifest see real files again
1522 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1533 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1523 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1534 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1524 self.changelog.checkinlinesize(tr)
1535 self.changelog.checkinlinesize(tr)
1525 self.manifest.checkinlinesize(tr)
1536 self.manifest.checkinlinesize(tr)
1526
1537
1527 newheads = len(self.changelog.heads())
1538 newheads = len(self.changelog.heads())
1528 heads = ""
1539 heads = ""
1529 if oldheads and newheads > oldheads:
1540 if oldheads and newheads > oldheads:
1530 heads = _(" (+%d heads)") % (newheads - oldheads)
1541 heads = _(" (+%d heads)") % (newheads - oldheads)
1531
1542
1532 self.ui.status(_("added %d changesets"
1543 self.ui.status(_("added %d changesets"
1533 " with %d changes to %d files%s\n")
1544 " with %d changes to %d files%s\n")
1534 % (changesets, revisions, files, heads))
1545 % (changesets, revisions, files, heads))
1535
1546
1536 self.hook('pretxnchangegroup', throw=True,
1547 self.hook('pretxnchangegroup', throw=True,
1537 node=hex(self.changelog.node(cor+1)), source=srctype)
1548 node=hex(self.changelog.node(cor+1)), source=srctype)
1538
1549
1539 tr.close()
1550 tr.close()
1540
1551
1541 if changesets > 0:
1552 if changesets > 0:
1542 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1553 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1543 source=srctype)
1554 source=srctype)
1544
1555
1545 for i in range(cor + 1, cnr + 1):
1556 for i in range(cor + 1, cnr + 1):
1546 self.hook("incoming", node=hex(self.changelog.node(i)),
1557 self.hook("incoming", node=hex(self.changelog.node(i)),
1547 source=srctype)
1558 source=srctype)
1548
1559
1549 return newheads - oldheads + 1
1560 return newheads - oldheads + 1
1550
1561
1551 def update(self, node, allow=False, force=False, choose=None,
1562 def update(self, node, allow=False, force=False, choose=None,
1552 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1563 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1553 pl = self.dirstate.parents()
1564 pl = self.dirstate.parents()
1554 if not force and pl[1] != nullid:
1565 if not force and pl[1] != nullid:
1555 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1566 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1556 return 1
1567 return 1
1557
1568
1558 err = False
1569 err = False
1559
1570
1560 p1, p2 = pl[0], node
1571 p1, p2 = pl[0], node
1561 pa = self.changelog.ancestor(p1, p2)
1572 pa = self.changelog.ancestor(p1, p2)
1562 m1n = self.changelog.read(p1)[0]
1573 m1n = self.changelog.read(p1)[0]
1563 m2n = self.changelog.read(p2)[0]
1574 m2n = self.changelog.read(p2)[0]
1564 man = self.manifest.ancestor(m1n, m2n)
1575 man = self.manifest.ancestor(m1n, m2n)
1565 m1 = self.manifest.read(m1n)
1576 m1 = self.manifest.read(m1n)
1566 mf1 = self.manifest.readflags(m1n)
1577 mf1 = self.manifest.readflags(m1n)
1567 m2 = self.manifest.read(m2n).copy()
1578 m2 = self.manifest.read(m2n).copy()
1568 mf2 = self.manifest.readflags(m2n)
1579 mf2 = self.manifest.readflags(m2n)
1569 ma = self.manifest.read(man)
1580 ma = self.manifest.read(man)
1570 mfa = self.manifest.readflags(man)
1581 mfa = self.manifest.readflags(man)
1571
1582
1572 modified, added, removed, deleted, unknown = self.changes()
1583 modified, added, removed, deleted, unknown = self.changes()
1573
1584
1574 # is this a jump, or a merge? i.e. is there a linear path
1585 # is this a jump, or a merge? i.e. is there a linear path
1575 # from p1 to p2?
1586 # from p1 to p2?
1576 linear_path = (pa == p1 or pa == p2)
1587 linear_path = (pa == p1 or pa == p2)
1577
1588
1578 if allow and linear_path:
1589 if allow and linear_path:
1579 raise util.Abort(_("there is nothing to merge, "
1590 raise util.Abort(_("there is nothing to merge, "
1580 "just use 'hg update'"))
1591 "just use 'hg update'"))
1581 if allow and not forcemerge:
1592 if allow and not forcemerge:
1582 if modified or added or removed:
1593 if modified or added or removed:
1583 raise util.Abort(_("outstanding uncommitted changes"))
1594 raise util.Abort(_("outstanding uncommitted changes"))
1584 if not forcemerge and not force:
1595 if not forcemerge and not force:
1585 for f in unknown:
1596 for f in unknown:
1586 if f in m2:
1597 if f in m2:
1587 t1 = self.wread(f)
1598 t1 = self.wread(f)
1588 t2 = self.file(f).read(m2[f])
1599 t2 = self.file(f).read(m2[f])
1589 if cmp(t1, t2) != 0:
1600 if cmp(t1, t2) != 0:
1590 raise util.Abort(_("'%s' already exists in the working"
1601 raise util.Abort(_("'%s' already exists in the working"
1591 " dir and differs from remote") % f)
1602 " dir and differs from remote") % f)
1592
1603
1593 # resolve the manifest to determine which files
1604 # resolve the manifest to determine which files
1594 # we care about merging
1605 # we care about merging
1595 self.ui.note(_("resolving manifests\n"))
1606 self.ui.note(_("resolving manifests\n"))
1596 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1607 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1597 (force, allow, moddirstate, linear_path))
1608 (force, allow, moddirstate, linear_path))
1598 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1609 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1599 (short(man), short(m1n), short(m2n)))
1610 (short(man), short(m1n), short(m2n)))
1600
1611
1601 merge = {}
1612 merge = {}
1602 get = {}
1613 get = {}
1603 remove = []
1614 remove = []
1604
1615
1605 # construct a working dir manifest
1616 # construct a working dir manifest
1606 mw = m1.copy()
1617 mw = m1.copy()
1607 mfw = mf1.copy()
1618 mfw = mf1.copy()
1608 umap = dict.fromkeys(unknown)
1619 umap = dict.fromkeys(unknown)
1609
1620
1610 for f in added + modified + unknown:
1621 for f in added + modified + unknown:
1611 mw[f] = ""
1622 mw[f] = ""
1612 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1623 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1613
1624
1614 if moddirstate and not wlock:
1625 if moddirstate and not wlock:
1615 wlock = self.wlock()
1626 wlock = self.wlock()
1616
1627
1617 for f in deleted + removed:
1628 for f in deleted + removed:
1618 if f in mw:
1629 if f in mw:
1619 del mw[f]
1630 del mw[f]
1620
1631
1621 # If we're jumping between revisions (as opposed to merging),
1632 # If we're jumping between revisions (as opposed to merging),
1622 # and if neither the working directory nor the target rev has
1633 # and if neither the working directory nor the target rev has
1623 # the file, then we need to remove it from the dirstate, to
1634 # the file, then we need to remove it from the dirstate, to
1624 # prevent the dirstate from listing the file when it is no
1635 # prevent the dirstate from listing the file when it is no
1625 # longer in the manifest.
1636 # longer in the manifest.
1626 if moddirstate and linear_path and f not in m2:
1637 if moddirstate and linear_path and f not in m2:
1627 self.dirstate.forget((f,))
1638 self.dirstate.forget((f,))
1628
1639
1629 # Compare manifests
1640 # Compare manifests
1630 for f, n in mw.iteritems():
1641 for f, n in mw.iteritems():
1631 if choose and not choose(f):
1642 if choose and not choose(f):
1632 continue
1643 continue
1633 if f in m2:
1644 if f in m2:
1634 s = 0
1645 s = 0
1635
1646
1636 # is the wfile new since m1, and match m2?
1647 # is the wfile new since m1, and match m2?
1637 if f not in m1:
1648 if f not in m1:
1638 t1 = self.wread(f)
1649 t1 = self.wread(f)
1639 t2 = self.file(f).read(m2[f])
1650 t2 = self.file(f).read(m2[f])
1640 if cmp(t1, t2) == 0:
1651 if cmp(t1, t2) == 0:
1641 n = m2[f]
1652 n = m2[f]
1642 del t1, t2
1653 del t1, t2
1643
1654
1644 # are files different?
1655 # are files different?
1645 if n != m2[f]:
1656 if n != m2[f]:
1646 a = ma.get(f, nullid)
1657 a = ma.get(f, nullid)
1647 # are both different from the ancestor?
1658 # are both different from the ancestor?
1648 if n != a and m2[f] != a:
1659 if n != a and m2[f] != a:
1649 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1660 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1650 # merge executable bits
1661 # merge executable bits
1651 # "if we changed or they changed, change in merge"
1662 # "if we changed or they changed, change in merge"
1652 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1663 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1653 mode = ((a^b) | (a^c)) ^ a
1664 mode = ((a^b) | (a^c)) ^ a
1654 merge[f] = (m1.get(f, nullid), m2[f], mode)
1665 merge[f] = (m1.get(f, nullid), m2[f], mode)
1655 s = 1
1666 s = 1
1656 # are we clobbering?
1667 # are we clobbering?
1657 # is remote's version newer?
1668 # is remote's version newer?
1658 # or are we going back in time?
1669 # or are we going back in time?
1659 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1670 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1660 self.ui.debug(_(" remote %s is newer, get\n") % f)
1671 self.ui.debug(_(" remote %s is newer, get\n") % f)
1661 get[f] = m2[f]
1672 get[f] = m2[f]
1662 s = 1
1673 s = 1
1663 elif f in umap or f in added:
1674 elif f in umap or f in added:
1664 # this unknown file is the same as the checkout
1675 # this unknown file is the same as the checkout
1665 # we need to reset the dirstate if the file was added
1676 # we need to reset the dirstate if the file was added
1666 get[f] = m2[f]
1677 get[f] = m2[f]
1667
1678
1668 if not s and mfw[f] != mf2[f]:
1679 if not s and mfw[f] != mf2[f]:
1669 if force:
1680 if force:
1670 self.ui.debug(_(" updating permissions for %s\n") % f)
1681 self.ui.debug(_(" updating permissions for %s\n") % f)
1671 util.set_exec(self.wjoin(f), mf2[f])
1682 util.set_exec(self.wjoin(f), mf2[f])
1672 else:
1683 else:
1673 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1684 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1674 mode = ((a^b) | (a^c)) ^ a
1685 mode = ((a^b) | (a^c)) ^ a
1675 if mode != b:
1686 if mode != b:
1676 self.ui.debug(_(" updating permissions for %s\n")
1687 self.ui.debug(_(" updating permissions for %s\n")
1677 % f)
1688 % f)
1678 util.set_exec(self.wjoin(f), mode)
1689 util.set_exec(self.wjoin(f), mode)
1679 del m2[f]
1690 del m2[f]
1680 elif f in ma:
1691 elif f in ma:
1681 if n != ma[f]:
1692 if n != ma[f]:
1682 r = _("d")
1693 r = _("d")
1683 if not force and (linear_path or allow):
1694 if not force and (linear_path or allow):
1684 r = self.ui.prompt(
1695 r = self.ui.prompt(
1685 (_(" local changed %s which remote deleted\n") % f) +
1696 (_(" local changed %s which remote deleted\n") % f) +
1686 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1697 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1687 if r == _("d"):
1698 if r == _("d"):
1688 remove.append(f)
1699 remove.append(f)
1689 else:
1700 else:
1690 self.ui.debug(_("other deleted %s\n") % f)
1701 self.ui.debug(_("other deleted %s\n") % f)
1691 remove.append(f) # other deleted it
1702 remove.append(f) # other deleted it
1692 else:
1703 else:
1693 # file is created on branch or in working directory
1704 # file is created on branch or in working directory
1694 if force and f not in umap:
1705 if force and f not in umap:
1695 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1706 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1696 remove.append(f)
1707 remove.append(f)
1697 elif n == m1.get(f, nullid): # same as parent
1708 elif n == m1.get(f, nullid): # same as parent
1698 if p2 == pa: # going backwards?
1709 if p2 == pa: # going backwards?
1699 self.ui.debug(_("remote deleted %s\n") % f)
1710 self.ui.debug(_("remote deleted %s\n") % f)
1700 remove.append(f)
1711 remove.append(f)
1701 else:
1712 else:
1702 self.ui.debug(_("local modified %s, keeping\n") % f)
1713 self.ui.debug(_("local modified %s, keeping\n") % f)
1703 else:
1714 else:
1704 self.ui.debug(_("working dir created %s, keeping\n") % f)
1715 self.ui.debug(_("working dir created %s, keeping\n") % f)
1705
1716
1706 for f, n in m2.iteritems():
1717 for f, n in m2.iteritems():
1707 if choose and not choose(f):
1718 if choose and not choose(f):
1708 continue
1719 continue
1709 if f[0] == "/":
1720 if f[0] == "/":
1710 continue
1721 continue
1711 if f in ma and n != ma[f]:
1722 if f in ma and n != ma[f]:
1712 r = _("k")
1723 r = _("k")
1713 if not force and (linear_path or allow):
1724 if not force and (linear_path or allow):
1714 r = self.ui.prompt(
1725 r = self.ui.prompt(
1715 (_("remote changed %s which local deleted\n") % f) +
1726 (_("remote changed %s which local deleted\n") % f) +
1716 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1727 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1717 if r == _("k"):
1728 if r == _("k"):
1718 get[f] = n
1729 get[f] = n
1719 elif f not in ma:
1730 elif f not in ma:
1720 self.ui.debug(_("remote created %s\n") % f)
1731 self.ui.debug(_("remote created %s\n") % f)
1721 get[f] = n
1732 get[f] = n
1722 else:
1733 else:
1723 if force or p2 == pa: # going backwards?
1734 if force or p2 == pa: # going backwards?
1724 self.ui.debug(_("local deleted %s, recreating\n") % f)
1735 self.ui.debug(_("local deleted %s, recreating\n") % f)
1725 get[f] = n
1736 get[f] = n
1726 else:
1737 else:
1727 self.ui.debug(_("local deleted %s\n") % f)
1738 self.ui.debug(_("local deleted %s\n") % f)
1728
1739
1729 del mw, m1, m2, ma
1740 del mw, m1, m2, ma
1730
1741
1731 if force:
1742 if force:
1732 for f in merge:
1743 for f in merge:
1733 get[f] = merge[f][1]
1744 get[f] = merge[f][1]
1734 merge = {}
1745 merge = {}
1735
1746
1736 if linear_path or force:
1747 if linear_path or force:
1737 # we don't need to do any magic, just jump to the new rev
1748 # we don't need to do any magic, just jump to the new rev
1738 branch_merge = False
1749 branch_merge = False
1739 p1, p2 = p2, nullid
1750 p1, p2 = p2, nullid
1740 else:
1751 else:
1741 if not allow:
1752 if not allow:
1742 self.ui.status(_("this update spans a branch"
1753 self.ui.status(_("this update spans a branch"
1743 " affecting the following files:\n"))
1754 " affecting the following files:\n"))
1744 fl = merge.keys() + get.keys()
1755 fl = merge.keys() + get.keys()
1745 fl.sort()
1756 fl.sort()
1746 for f in fl:
1757 for f in fl:
1747 cf = ""
1758 cf = ""
1748 if f in merge:
1759 if f in merge:
1749 cf = _(" (resolve)")
1760 cf = _(" (resolve)")
1750 self.ui.status(" %s%s\n" % (f, cf))
1761 self.ui.status(" %s%s\n" % (f, cf))
1751 self.ui.warn(_("aborting update spanning branches!\n"))
1762 self.ui.warn(_("aborting update spanning branches!\n"))
1752 self.ui.status(_("(use 'hg merge' to merge across branches"
1763 self.ui.status(_("(use 'hg merge' to merge across branches"
1753 " or 'hg update -C' to lose changes)\n"))
1764 " or 'hg update -C' to lose changes)\n"))
1754 return 1
1765 return 1
1755 branch_merge = True
1766 branch_merge = True
1756
1767
1757 # get the files we don't need to change
1768 # get the files we don't need to change
1758 files = get.keys()
1769 files = get.keys()
1759 files.sort()
1770 files.sort()
1760 for f in files:
1771 for f in files:
1761 if f[0] == "/":
1772 if f[0] == "/":
1762 continue
1773 continue
1763 self.ui.note(_("getting %s\n") % f)
1774 self.ui.note(_("getting %s\n") % f)
1764 t = self.file(f).read(get[f])
1775 t = self.file(f).read(get[f])
1765 self.wwrite(f, t)
1776 self.wwrite(f, t)
1766 util.set_exec(self.wjoin(f), mf2[f])
1777 util.set_exec(self.wjoin(f), mf2[f])
1767 if moddirstate:
1778 if moddirstate:
1768 if branch_merge:
1779 if branch_merge:
1769 self.dirstate.update([f], 'n', st_mtime=-1)
1780 self.dirstate.update([f], 'n', st_mtime=-1)
1770 else:
1781 else:
1771 self.dirstate.update([f], 'n')
1782 self.dirstate.update([f], 'n')
1772
1783
1773 # merge the tricky bits
1784 # merge the tricky bits
1774 failedmerge = []
1785 failedmerge = []
1775 files = merge.keys()
1786 files = merge.keys()
1776 files.sort()
1787 files.sort()
1777 xp1 = hex(p1)
1788 xp1 = hex(p1)
1778 xp2 = hex(p2)
1789 xp2 = hex(p2)
1779 for f in files:
1790 for f in files:
1780 self.ui.status(_("merging %s\n") % f)
1791 self.ui.status(_("merging %s\n") % f)
1781 my, other, flag = merge[f]
1792 my, other, flag = merge[f]
1782 ret = self.merge3(f, my, other, xp1, xp2)
1793 ret = self.merge3(f, my, other, xp1, xp2)
1783 if ret:
1794 if ret:
1784 err = True
1795 err = True
1785 failedmerge.append(f)
1796 failedmerge.append(f)
1786 util.set_exec(self.wjoin(f), flag)
1797 util.set_exec(self.wjoin(f), flag)
1787 if moddirstate:
1798 if moddirstate:
1788 if branch_merge:
1799 if branch_merge:
1789 # We've done a branch merge, mark this file as merged
1800 # We've done a branch merge, mark this file as merged
1790 # so that we properly record the merger later
1801 # so that we properly record the merger later
1791 self.dirstate.update([f], 'm')
1802 self.dirstate.update([f], 'm')
1792 else:
1803 else:
1793 # We've update-merged a locally modified file, so
1804 # We've update-merged a locally modified file, so
1794 # we set the dirstate to emulate a normal checkout
1805 # we set the dirstate to emulate a normal checkout
1795 # of that file some time in the past. Thus our
1806 # of that file some time in the past. Thus our
1796 # merge will appear as a normal local file
1807 # merge will appear as a normal local file
1797 # modification.
1808 # modification.
1798 f_len = len(self.file(f).read(other))
1809 f_len = len(self.file(f).read(other))
1799 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1810 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1800
1811
1801 remove.sort()
1812 remove.sort()
1802 for f in remove:
1813 for f in remove:
1803 self.ui.note(_("removing %s\n") % f)
1814 self.ui.note(_("removing %s\n") % f)
1804 util.audit_path(f)
1815 util.audit_path(f)
1805 try:
1816 try:
1806 util.unlink(self.wjoin(f))
1817 util.unlink(self.wjoin(f))
1807 except OSError, inst:
1818 except OSError, inst:
1808 if inst.errno != errno.ENOENT:
1819 if inst.errno != errno.ENOENT:
1809 self.ui.warn(_("update failed to remove %s: %s!\n") %
1820 self.ui.warn(_("update failed to remove %s: %s!\n") %
1810 (f, inst.strerror))
1821 (f, inst.strerror))
1811 if moddirstate:
1822 if moddirstate:
1812 if branch_merge:
1823 if branch_merge:
1813 self.dirstate.update(remove, 'r')
1824 self.dirstate.update(remove, 'r')
1814 else:
1825 else:
1815 self.dirstate.forget(remove)
1826 self.dirstate.forget(remove)
1816
1827
1817 if moddirstate:
1828 if moddirstate:
1818 self.dirstate.setparents(p1, p2)
1829 self.dirstate.setparents(p1, p2)
1819
1830
1820 if show_stats:
1831 if show_stats:
1821 stats = ((len(get), _("updated")),
1832 stats = ((len(get), _("updated")),
1822 (len(merge) - len(failedmerge), _("merged")),
1833 (len(merge) - len(failedmerge), _("merged")),
1823 (len(remove), _("removed")),
1834 (len(remove), _("removed")),
1824 (len(failedmerge), _("unresolved")))
1835 (len(failedmerge), _("unresolved")))
1825 note = ", ".join([_("%d files %s") % s for s in stats])
1836 note = ", ".join([_("%d files %s") % s for s in stats])
1826 self.ui.status("%s\n" % note)
1837 self.ui.status("%s\n" % note)
1827 if moddirstate:
1838 if moddirstate:
1828 if branch_merge:
1839 if branch_merge:
1829 if failedmerge:
1840 if failedmerge:
1830 self.ui.status(_("There are unresolved merges,"
1841 self.ui.status(_("There are unresolved merges,"
1831 " you can redo the full merge using:\n"
1842 " you can redo the full merge using:\n"
1832 " hg update -C %s\n"
1843 " hg update -C %s\n"
1833 " hg merge %s\n"
1844 " hg merge %s\n"
1834 % (self.changelog.rev(p1),
1845 % (self.changelog.rev(p1),
1835 self.changelog.rev(p2))))
1846 self.changelog.rev(p2))))
1836 else:
1847 else:
1837 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1848 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1838 elif failedmerge:
1849 elif failedmerge:
1839 self.ui.status(_("There are unresolved merges with"
1850 self.ui.status(_("There are unresolved merges with"
1840 " locally modified files.\n"))
1851 " locally modified files.\n"))
1841
1852
1842 return err
1853 return err
1843
1854
1844 def merge3(self, fn, my, other, p1, p2):
1855 def merge3(self, fn, my, other, p1, p2):
1845 """perform a 3-way merge in the working directory"""
1856 """perform a 3-way merge in the working directory"""
1846
1857
1847 def temp(prefix, node):
1858 def temp(prefix, node):
1848 pre = "%s~%s." % (os.path.basename(fn), prefix)
1859 pre = "%s~%s." % (os.path.basename(fn), prefix)
1849 (fd, name) = tempfile.mkstemp(prefix=pre)
1860 (fd, name) = tempfile.mkstemp(prefix=pre)
1850 f = os.fdopen(fd, "wb")
1861 f = os.fdopen(fd, "wb")
1851 self.wwrite(fn, fl.read(node), f)
1862 self.wwrite(fn, fl.read(node), f)
1852 f.close()
1863 f.close()
1853 return name
1864 return name
1854
1865
1855 fl = self.file(fn)
1866 fl = self.file(fn)
1856 base = fl.ancestor(my, other)
1867 base = fl.ancestor(my, other)
1857 a = self.wjoin(fn)
1868 a = self.wjoin(fn)
1858 b = temp("base", base)
1869 b = temp("base", base)
1859 c = temp("other", other)
1870 c = temp("other", other)
1860
1871
1861 self.ui.note(_("resolving %s\n") % fn)
1872 self.ui.note(_("resolving %s\n") % fn)
1862 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1873 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1863 (fn, short(my), short(other), short(base)))
1874 (fn, short(my), short(other), short(base)))
1864
1875
1865 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1876 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1866 or "hgmerge")
1877 or "hgmerge")
1867 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1878 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1868 environ={'HG_FILE': fn,
1879 environ={'HG_FILE': fn,
1869 'HG_MY_NODE': p1,
1880 'HG_MY_NODE': p1,
1870 'HG_OTHER_NODE': p2,
1881 'HG_OTHER_NODE': p2,
1871 'HG_FILE_MY_NODE': hex(my),
1882 'HG_FILE_MY_NODE': hex(my),
1872 'HG_FILE_OTHER_NODE': hex(other),
1883 'HG_FILE_OTHER_NODE': hex(other),
1873 'HG_FILE_BASE_NODE': hex(base)})
1884 'HG_FILE_BASE_NODE': hex(base)})
1874 if r:
1885 if r:
1875 self.ui.warn(_("merging %s failed!\n") % fn)
1886 self.ui.warn(_("merging %s failed!\n") % fn)
1876
1887
1877 os.unlink(b)
1888 os.unlink(b)
1878 os.unlink(c)
1889 os.unlink(c)
1879 return r
1890 return r
1880
1891
1881 def verify(self):
1892 def verify(self):
1882 filelinkrevs = {}
1893 filelinkrevs = {}
1883 filenodes = {}
1894 filenodes = {}
1884 changesets = revisions = files = 0
1895 changesets = revisions = files = 0
1885 errors = [0]
1896 errors = [0]
1886 warnings = [0]
1897 warnings = [0]
1887 neededmanifests = {}
1898 neededmanifests = {}
1888
1899
1889 def err(msg):
1900 def err(msg):
1890 self.ui.warn(msg + "\n")
1901 self.ui.warn(msg + "\n")
1891 errors[0] += 1
1902 errors[0] += 1
1892
1903
1893 def warn(msg):
1904 def warn(msg):
1894 self.ui.warn(msg + "\n")
1905 self.ui.warn(msg + "\n")
1895 warnings[0] += 1
1906 warnings[0] += 1
1896
1907
1897 def checksize(obj, name):
1908 def checksize(obj, name):
1898 d = obj.checksize()
1909 d = obj.checksize()
1899 if d[0]:
1910 if d[0]:
1900 err(_("%s data length off by %d bytes") % (name, d[0]))
1911 err(_("%s data length off by %d bytes") % (name, d[0]))
1901 if d[1]:
1912 if d[1]:
1902 err(_("%s index contains %d extra bytes") % (name, d[1]))
1913 err(_("%s index contains %d extra bytes") % (name, d[1]))
1903
1914
1904 def checkversion(obj, name):
1915 def checkversion(obj, name):
1905 if obj.version != revlog.REVLOGV0:
1916 if obj.version != revlog.REVLOGV0:
1906 if not revlogv1:
1917 if not revlogv1:
1907 warn(_("warning: `%s' uses revlog format 1") % name)
1918 warn(_("warning: `%s' uses revlog format 1") % name)
1908 elif revlogv1:
1919 elif revlogv1:
1909 warn(_("warning: `%s' uses revlog format 0") % name)
1920 warn(_("warning: `%s' uses revlog format 0") % name)
1910
1921
1911 revlogv1 = self.revlogversion != revlog.REVLOGV0
1922 revlogv1 = self.revlogversion != revlog.REVLOGV0
1912 if self.ui.verbose or revlogv1 != self.revlogv1:
1923 if self.ui.verbose or revlogv1 != self.revlogv1:
1913 self.ui.status(_("repository uses revlog format %d\n") %
1924 self.ui.status(_("repository uses revlog format %d\n") %
1914 (revlogv1 and 1 or 0))
1925 (revlogv1 and 1 or 0))
1915
1926
1916 seen = {}
1927 seen = {}
1917 self.ui.status(_("checking changesets\n"))
1928 self.ui.status(_("checking changesets\n"))
1918 checksize(self.changelog, "changelog")
1929 checksize(self.changelog, "changelog")
1919
1930
1920 for i in range(self.changelog.count()):
1931 for i in range(self.changelog.count()):
1921 changesets += 1
1932 changesets += 1
1922 n = self.changelog.node(i)
1933 n = self.changelog.node(i)
1923 l = self.changelog.linkrev(n)
1934 l = self.changelog.linkrev(n)
1924 if l != i:
1935 if l != i:
1925 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1936 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1926 if n in seen:
1937 if n in seen:
1927 err(_("duplicate changeset at revision %d") % i)
1938 err(_("duplicate changeset at revision %d") % i)
1928 seen[n] = 1
1939 seen[n] = 1
1929
1940
1930 for p in self.changelog.parents(n):
1941 for p in self.changelog.parents(n):
1931 if p not in self.changelog.nodemap:
1942 if p not in self.changelog.nodemap:
1932 err(_("changeset %s has unknown parent %s") %
1943 err(_("changeset %s has unknown parent %s") %
1933 (short(n), short(p)))
1944 (short(n), short(p)))
1934 try:
1945 try:
1935 changes = self.changelog.read(n)
1946 changes = self.changelog.read(n)
1936 except KeyboardInterrupt:
1947 except KeyboardInterrupt:
1937 self.ui.warn(_("interrupted"))
1948 self.ui.warn(_("interrupted"))
1938 raise
1949 raise
1939 except Exception, inst:
1950 except Exception, inst:
1940 err(_("unpacking changeset %s: %s") % (short(n), inst))
1951 err(_("unpacking changeset %s: %s") % (short(n), inst))
1941 continue
1952 continue
1942
1953
1943 neededmanifests[changes[0]] = n
1954 neededmanifests[changes[0]] = n
1944
1955
1945 for f in changes[3]:
1956 for f in changes[3]:
1946 filelinkrevs.setdefault(f, []).append(i)
1957 filelinkrevs.setdefault(f, []).append(i)
1947
1958
1948 seen = {}
1959 seen = {}
1949 self.ui.status(_("checking manifests\n"))
1960 self.ui.status(_("checking manifests\n"))
1950 checkversion(self.manifest, "manifest")
1961 checkversion(self.manifest, "manifest")
1951 checksize(self.manifest, "manifest")
1962 checksize(self.manifest, "manifest")
1952
1963
1953 for i in range(self.manifest.count()):
1964 for i in range(self.manifest.count()):
1954 n = self.manifest.node(i)
1965 n = self.manifest.node(i)
1955 l = self.manifest.linkrev(n)
1966 l = self.manifest.linkrev(n)
1956
1967
1957 if l < 0 or l >= self.changelog.count():
1968 if l < 0 or l >= self.changelog.count():
1958 err(_("bad manifest link (%d) at revision %d") % (l, i))
1969 err(_("bad manifest link (%d) at revision %d") % (l, i))
1959
1970
1960 if n in neededmanifests:
1971 if n in neededmanifests:
1961 del neededmanifests[n]
1972 del neededmanifests[n]
1962
1973
1963 if n in seen:
1974 if n in seen:
1964 err(_("duplicate manifest at revision %d") % i)
1975 err(_("duplicate manifest at revision %d") % i)
1965
1976
1966 seen[n] = 1
1977 seen[n] = 1
1967
1978
1968 for p in self.manifest.parents(n):
1979 for p in self.manifest.parents(n):
1969 if p not in self.manifest.nodemap:
1980 if p not in self.manifest.nodemap:
1970 err(_("manifest %s has unknown parent %s") %
1981 err(_("manifest %s has unknown parent %s") %
1971 (short(n), short(p)))
1982 (short(n), short(p)))
1972
1983
1973 try:
1984 try:
1974 delta = mdiff.patchtext(self.manifest.delta(n))
1985 delta = mdiff.patchtext(self.manifest.delta(n))
1975 except KeyboardInterrupt:
1986 except KeyboardInterrupt:
1976 self.ui.warn(_("interrupted"))
1987 self.ui.warn(_("interrupted"))
1977 raise
1988 raise
1978 except Exception, inst:
1989 except Exception, inst:
1979 err(_("unpacking manifest %s: %s") % (short(n), inst))
1990 err(_("unpacking manifest %s: %s") % (short(n), inst))
1980 continue
1991 continue
1981
1992
1982 try:
1993 try:
1983 ff = [ l.split('\0') for l in delta.splitlines() ]
1994 ff = [ l.split('\0') for l in delta.splitlines() ]
1984 for f, fn in ff:
1995 for f, fn in ff:
1985 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1996 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1986 except (ValueError, TypeError), inst:
1997 except (ValueError, TypeError), inst:
1987 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1998 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1988
1999
1989 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2000 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1990
2001
1991 for m, c in neededmanifests.items():
2002 for m, c in neededmanifests.items():
1992 err(_("Changeset %s refers to unknown manifest %s") %
2003 err(_("Changeset %s refers to unknown manifest %s") %
1993 (short(m), short(c)))
2004 (short(m), short(c)))
1994 del neededmanifests
2005 del neededmanifests
1995
2006
1996 for f in filenodes:
2007 for f in filenodes:
1997 if f not in filelinkrevs:
2008 if f not in filelinkrevs:
1998 err(_("file %s in manifest but not in changesets") % f)
2009 err(_("file %s in manifest but not in changesets") % f)
1999
2010
2000 for f in filelinkrevs:
2011 for f in filelinkrevs:
2001 if f not in filenodes:
2012 if f not in filenodes:
2002 err(_("file %s in changeset but not in manifest") % f)
2013 err(_("file %s in changeset but not in manifest") % f)
2003
2014
2004 self.ui.status(_("checking files\n"))
2015 self.ui.status(_("checking files\n"))
2005 ff = filenodes.keys()
2016 ff = filenodes.keys()
2006 ff.sort()
2017 ff.sort()
2007 for f in ff:
2018 for f in ff:
2008 if f == "/dev/null":
2019 if f == "/dev/null":
2009 continue
2020 continue
2010 files += 1
2021 files += 1
2011 if not f:
2022 if not f:
2012 err(_("file without name in manifest %s") % short(n))
2023 err(_("file without name in manifest %s") % short(n))
2013 continue
2024 continue
2014 fl = self.file(f)
2025 fl = self.file(f)
2015 checkversion(fl, f)
2026 checkversion(fl, f)
2016 checksize(fl, f)
2027 checksize(fl, f)
2017
2028
2018 nodes = {nullid: 1}
2029 nodes = {nullid: 1}
2019 seen = {}
2030 seen = {}
2020 for i in range(fl.count()):
2031 for i in range(fl.count()):
2021 revisions += 1
2032 revisions += 1
2022 n = fl.node(i)
2033 n = fl.node(i)
2023
2034
2024 if n in seen:
2035 if n in seen:
2025 err(_("%s: duplicate revision %d") % (f, i))
2036 err(_("%s: duplicate revision %d") % (f, i))
2026 if n not in filenodes[f]:
2037 if n not in filenodes[f]:
2027 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2038 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2028 else:
2039 else:
2029 del filenodes[f][n]
2040 del filenodes[f][n]
2030
2041
2031 flr = fl.linkrev(n)
2042 flr = fl.linkrev(n)
2032 if flr not in filelinkrevs.get(f, []):
2043 if flr not in filelinkrevs.get(f, []):
2033 err(_("%s:%s points to unexpected changeset %d")
2044 err(_("%s:%s points to unexpected changeset %d")
2034 % (f, short(n), flr))
2045 % (f, short(n), flr))
2035 else:
2046 else:
2036 filelinkrevs[f].remove(flr)
2047 filelinkrevs[f].remove(flr)
2037
2048
2038 # verify contents
2049 # verify contents
2039 try:
2050 try:
2040 t = fl.read(n)
2051 t = fl.read(n)
2041 except KeyboardInterrupt:
2052 except KeyboardInterrupt:
2042 self.ui.warn(_("interrupted"))
2053 self.ui.warn(_("interrupted"))
2043 raise
2054 raise
2044 except Exception, inst:
2055 except Exception, inst:
2045 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2056 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2046
2057
2047 # verify parents
2058 # verify parents
2048 (p1, p2) = fl.parents(n)
2059 (p1, p2) = fl.parents(n)
2049 if p1 not in nodes:
2060 if p1 not in nodes:
2050 err(_("file %s:%s unknown parent 1 %s") %
2061 err(_("file %s:%s unknown parent 1 %s") %
2051 (f, short(n), short(p1)))
2062 (f, short(n), short(p1)))
2052 if p2 not in nodes:
2063 if p2 not in nodes:
2053 err(_("file %s:%s unknown parent 2 %s") %
2064 err(_("file %s:%s unknown parent 2 %s") %
2054 (f, short(n), short(p1)))
2065 (f, short(n), short(p1)))
2055 nodes[n] = 1
2066 nodes[n] = 1
2056
2067
2057 # cross-check
2068 # cross-check
2058 for node in filenodes[f]:
2069 for node in filenodes[f]:
2059 err(_("node %s in manifests not in %s") % (hex(node), f))
2070 err(_("node %s in manifests not in %s") % (hex(node), f))
2060
2071
2061 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2072 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2062 (files, changesets, revisions))
2073 (files, changesets, revisions))
2063
2074
2064 if warnings[0]:
2075 if warnings[0]:
2065 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2076 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2066 if errors[0]:
2077 if errors[0]:
2067 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2078 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2068 return 1
2079 return 1
2069
2080
2070 # used to avoid circular references so destructors work
2081 # used to avoid circular references so destructors work
2071 def aftertrans(base):
2082 def aftertrans(base):
2072 p = base
2083 p = base
2073 def a():
2084 def a():
2074 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2085 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2075 util.rename(os.path.join(p, "journal.dirstate"),
2086 util.rename(os.path.join(p, "journal.dirstate"),
2076 os.path.join(p, "undo.dirstate"))
2087 os.path.join(p, "undo.dirstate"))
2077 return a
2088 return a
2078
2089
General Comments 0
You need to be logged in to leave comments. Login now