diff --git a/.black b/.black new file mode 100644 index 000000000..67aa0fb24 --- /dev/null +++ b/.black @@ -0,0 +1,5 @@ + +[tool.black] +line-length=80 +target-version=["py311"] + diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..1af763c54 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Migrate code style to Black +a158d613bf979c152338f6c407ec6de75d9a9c1a diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml new file mode 100644 index 000000000..4e6f058f7 --- /dev/null +++ b/.github/workflows/black.yml @@ -0,0 +1,12 @@ +name: lint +on: [push, pull_request] +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: psf/black@stable + with: + options: "--check --config=.black" + src: "./src" + diff --git a/src/brand/bhyve/boot.py b/src/brand/bhyve/boot.py index c25c8cf60..b37a48177 100755 --- a/src/brand/bhyve/boot.py +++ b/src/brand/bhyve/boot.py @@ -17,8 +17,17 @@ import bundle import bootlib -from bootlib import fatal, error, debug, info, warning, boolv, diskpath, \ - expandopts, collapseopts +from bootlib import ( + fatal, + error, + debug, + info, + warning, + boolv, + diskpath, + expandopts, + collapseopts, +) import getopt import logging import os @@ -34,6 +43,8 @@ import uefi.vars as uefivars +# fmt: off + STATEDIR = '/var/run/bhyve' RSRVRCTL = '/usr/lib/rsrvrctl' FIRMWAREPATH = '/usr/share/bhyve/firmware' @@ -116,6 +127,8 @@ VNC_SLOT = 30 LPC_SLOT_WIN = 31 +# fmt: off + ############################################################################## sysboot = False @@ -665,6 +678,8 @@ def apply_bootnext(v): from itertools import zip_longest import json + # fmt: off + data = { 'zonename': z.name, 'zonepath': z.zonepath, @@ -688,6 +703,8 @@ def apply_bootnext(v): 'config': {}, } + # fmt: on + for line in p.stdout.splitlines(): if line.startswith('config.dump'): continue if '=' not in line: continue diff --git a/src/brand/bhyve/bootlib.py b/src/brand/bhyve/bootlib.py index f289c7417..b1a750b9b 100644 --- a/src/brand/bhyve/bootlib.py +++ b/src/brand/bhyve/bootlib.py @@ -28,39 +28,49 @@ log_quiet = False + def log_stdout(level): logging.basicConfig(stream=sys.stdout, level=level) + def log_file(file, level): os.makedirs(os.path.dirname(file), mode=0o711, exist_ok=True) - logging.basicConfig(filename=file, filemode='a', level=level, force=True) + logging.basicConfig(filename=file, filemode="a", level=level, force=True) + def set_quiet(val): global log_quiet log_quiet = val + def fatal(msg): logging.error(msg) print(msg, file=sys.stderr) sys.exit(1) + def error(msg): logging.error(msg) + def debug(msg): if not log_quiet: logging.debug(msg) + def info(msg): if not log_quiet: logging.info(msg) + def warning(msg): if not log_quiet: logging.warning(msg) + ############################################################################## + class Zone: xmlfile = None xmlcfg = None @@ -70,11 +80,11 @@ def __init__(self, xmlfile): self.xmlfile = xmlfile if not os.path.isfile(xmlfile): - fatal(f'Cannot find zone XML file at {xmlfile}') + fatal(f"Cannot find zone XML file at {xmlfile}") try: self.xmlcfg = etree.parse(xmlfile) except: - fatal(f'Could not parse {xmlfile}') + fatal(f"Could not parse {xmlfile}") self.xmlroot = self.xmlcfg.getroot() @@ -83,7 +93,7 @@ def __getattr__(self, attr): @property def zoneroot(self): - return f'{self.zonepath}/root' + return f"{self.zonepath}/root" def find(self, path): return self.xmlroot.find(path) @@ -101,22 +111,22 @@ def findattr(self, attr, all=False): return self.find(f'./attr[@name="{attr}"]') def iterattr(self, regex): - for dev in self.findall('./attr[@name]'): - if m := re.search(regex, dev.get('name').strip()): + for dev in self.findall("./attr[@name]"): + if m := re.search(regex, dev.get("name").strip()): yield dev, m def parseopt(self, tag, opts, aliases): try: el = self.findattr(tag) - opts[tag] = el.get('value').strip() + opts[tag] = el.get("value").strip() debug(f'Found custom {tag} attribute - "{opts[tag]}"') if tag in aliases: val = opts[tag] if (bt := boolv(val, tag, ignore=True)) is not None: - val = 'on' if bt else 'off' + val = "on" if bt else "off" try: opts[tag] = aliases[tag][val] - debug(f' Alias expanded to {opts[tag]}') + debug(f" Alias expanded to {opts[tag]}") except KeyError: pass except: @@ -124,9 +134,9 @@ def parseopt(self, tag, opts, aliases): def uuid(self): try: - with open(f'{z.zoneroot}/etc/uuid') as file: + with open(f"{z.zoneroot}/etc/uuid") as file: uuid = file.read().strip() - info('Zone UUID: {0}'.format(id)) + info("Zone UUID: {0}".format(id)) except: uuid = str(uuidlib.uuid4()) return uuid @@ -135,201 +145,215 @@ def uuid(self): # generate a list. def build_devlist(self, type, maxval, plain=True): devlist = {} - for dev, m in self.iterattr(rf'^{type}(\d+)$'): + for dev, m in self.iterattr(rf"^{type}(\d+)$"): k = int(m.group(1)) if k in devlist: - fatal(f'{type}{k} appears more than once in configuration') - if (k >= maxval): - fatal(f'{type}{k} slot out of range') - devlist[k] = dev.get('value').strip() + fatal(f"{type}{k} appears more than once in configuration") + if k >= maxval: + fatal(f"{type}{k} slot out of range") + devlist[k] = dev.get("value").strip() if plain: # Now insert plain tags into the list, using available slots # in order - avail = sorted(set(range(0, maxval)). - difference(sorted(devlist.keys()))) + avail = sorted( + set(range(0, maxval)).difference(sorted(devlist.keys())) + ) for dev in self.findattr(type, True): try: k = avail.pop(0) except IndexError: - fatal(f'{type}: no more available slots') - devlist[k] = dev.get('value').strip() + fatal(f"{type}: no more available slots") + devlist[k] = dev.get("value").strip() - debug('{} list: \n{}'.format(type, pformat(devlist))) + debug("{} list: \n{}".format(type, pformat(devlist))) return sorted(devlist.items()) def build_cloudinit_image(self, uuid, src, testmode): - info('Generating cloud-init data') + info("Generating cloud-init data") #### Metadata meta_data = { - 'instance-id': uuid, - 'local-hostname': self.name, + "instance-id": uuid, + "local-hostname": self.name, } #### Userdata user_data = { - 'hostname': self.name, - 'disable_root': False, + "hostname": self.name, + "disable_root": False, } - if (v := self.findattr('password')) is not None: - user_data['password'] = file_or_string(v.get('value')) - user_data['chpasswd'] = { 'expire': False } - user_data['ssh-pwauth'] = True - - if (v := self.findattr('sshkey')) is not None: - v = file_or_string(v.get('value')) - user_data['ssh_authorized_keys'] = [v] - user_data['users'] = [ - 'default', - {'name': 'root', 'ssh_authorized_keys': [v]} + if (v := self.findattr("password")) is not None: + user_data["password"] = file_or_string(v.get("value")) + user_data["chpasswd"] = {"expire": False} + user_data["ssh-pwauth"] = True + + if (v := self.findattr("sshkey")) is not None: + v = file_or_string(v.get("value")) + user_data["ssh_authorized_keys"] = [v] + user_data["users"] = [ + "default", + {"name": "root", "ssh_authorized_keys": [v]}, ] #### Network network_data = {} - addresses = self.findall('./network[@allowed-address]') + addresses = self.findall("./network[@allowed-address]") if addresses is not None: nsdone = False - network_data['version'] = 2 - network_data['ethernets'] = {} + network_data["version"] = 2 + network_data["ethernets"] = {} for a in addresses: - vnic = a.get('physical') - addr = a.get('allowed-address') - rtr = a.get('defrouter') + vnic = a.get("physical") + addr = a.get("allowed-address") + rtr = a.get("defrouter") mac = get_mac(vnic) if mac is None: continue data = { - 'match': { 'macaddress': mac }, - 'set-name': vnic, - 'addresses': [addr], + "match": {"macaddress": mac}, + "set-name": vnic, + "addresses": [addr], } if rtr: - data['gateway4'] = rtr + data["gateway4"] = rtr if not nsdone: - domain = self.findattr('dns-domain') - resolvers = self.findattr('resolvers') + domain = self.findattr("dns-domain") + resolvers = self.findattr("resolvers") if resolvers is not None or domain is not None: nsdata = {} if domain is not None: - nsdata['search'] = [domain.get('value').strip()] + nsdata["search"] = [domain.get("value").strip()] if resolvers is not None: - nsdata['addresses'] = \ - resolvers.get('value').strip().split(',') - data['nameservers'] = nsdata + nsdata["addresses"] = ( + resolvers.get("value").strip().split(",") + ) + data["nameservers"] = nsdata nsdone = True - network_data['ethernets'][vnic] = data + network_data["ethernets"][vnic] = data import yaml - debug('Metadata:\n' + yaml.dump(meta_data)) - debug('Userdata:\n' + yaml.dump(user_data)) - debug('Netdata:\n' + yaml.dump(network_data)) + + debug("Metadata:\n" + yaml.dump(meta_data)) + debug("Userdata:\n" + yaml.dump(user_data)) + debug("Netdata:\n" + yaml.dump(network_data)) if testmode: return - dir = tempfile.mkdtemp(dir=f'{self.zoneroot}', prefix='cloud-init.') + dir = tempfile.mkdtemp(dir=f"{self.zoneroot}", prefix="cloud-init.") - with open(f'{dir}/meta-data', 'w') as fh: + with open(f"{dir}/meta-data", "w") as fh: yaml.dump(meta_data, fh) if os.path.isabs(src) and os.path.isfile(src): - info(f'Using supplied cloud-init user-data file from {src}') - shutil.copyfile(src, f'{dir}/user-data') + info(f"Using supplied cloud-init user-data file from {src}") + shutil.copyfile(src, f"{dir}/user-data") else: - with open(f'{dir}/user-data', 'w') as fh: - fh.write('#cloud-config\n') + with open(f"{dir}/user-data", "w") as fh: + fh.write("#cloud-config\n") yaml.dump(user_data, fh) if network_data: - with open(f'{dir}/network-config', 'w') as fh: + with open(f"{dir}/network-config", "w") as fh: yaml.dump(network_data, fh) #### Build image - cidir = f'{self.zoneroot}/cloud-init' - seed = f'{self.zoneroot}/cloud-init.iso' + cidir = f"{self.zoneroot}/cloud-init" + seed = f"{self.zoneroot}/cloud-init.iso" if os.path.exists(cidir): shutil.rmtree(cidir) os.rename(dir, cidir) - info('Building cloud-init ISO image') + info("Building cloud-init ISO image") try: - ret = subprocess.run([ - '/usr/bin/mkisofs', - '-full-iso9660-filenames', - '-untranslated-filenames', - '-rock', - '-volid', 'CIDATA', - '-o', seed, - cidir - ], text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + ret = subprocess.run( + [ + "/usr/bin/mkisofs", + "-full-iso9660-filenames", + "-untranslated-filenames", + "-rock", + "-volid", + "CIDATA", + "-o", + seed, + cidir, + ], + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) for l in ret.stdout.splitlines(): info(l) os.chmod(seed, mode=0o644) except Exception as e: - fatal(f'Could not create cloud-init ISO image: {e}') + fatal(f"Could not create cloud-init ISO image: {e}") + ############################################################################## + def boolv(s, var, ignore=False): - if s in ['true', 'yes', 'on', '1']: + if s in ["true", "yes", "on", "1"]: return True - if s in ['false', 'no', 'off', '0']: + if s in ["false", "no", "off", "0"]: return False if not ignore: - fatal(f'Invalid value {s} for boolean variable {var}') + fatal(f"Invalid value {s} for boolean variable {var}") return None + def file_or_string(f): if os.path.isabs(f) and os.path.isfile(f): try: with open(f) as fh: f = fh.read() except Exception as e: - fatal(f'Could not read from {f}: {e}') + fatal(f"Could not read from {f}: {e}") return f.strip() + def expandopts(opt): - """ Expand a comma-separated option string out into a dictionary. - For example: - on,password=fred,wait,w=1234 - becomes: - {'on': '', 'password': 'fred', 'wait': '', 'w': '1234'} + """Expand a comma-separated option string out into a dictionary. + For example: + on,password=fred,wait,w=1234 + becomes: + {'on': '', 'password': 'fred', 'wait': '', 'w': '1234'} """ return { - k: v - for (k, v, *_) - in [ - (o + '=').split('=') - for o in opt.split(',') - ] + k: v for (k, v, *_) in [(o + "=").split("=") for o in opt.split(",")] } + def collapseopts(opts): - """ The reverse of expandopts. Convert a dictionary back into an option - string. """ - return ','.join([f'{k}={v}'.rstrip('=') for k, v in opts.items()]) + """The reverse of expandopts. Convert a dictionary back into an option + string.""" + return ",".join([f"{k}={v}".rstrip("=") for k, v in opts.items()]) + def diskpath(arg): - if arg.startswith('/'): + if arg.startswith("/"): return arg - return '/dev/zvol/rdsk/{0}'.format(arg) + return "/dev/zvol/rdsk/{0}".format(arg) + + +ram_shift = {"e": 60, "p": 50, "t": 40, "g": 30, "m": 20, "k": 10, "b": 0} + -ram_shift = { 'e': 60, 'p': 50, 't': 40, 'g': 30, 'm': 20, 'k': 10, 'b': 0 } def parse_ram(v): # Parse a string representing an amount of RAM into bytes - m = re.search(rf'^(\d+)(.?)$', v) + m = re.search(rf"^(\d+)(.?)$", v) if not m: fatal(f'Could not parse ram value "{v}"') (mem, suffix) = m.groups() @@ -338,27 +362,31 @@ def parse_ram(v): if not suffix: # If the memory size specified as a plain number is less than a # mebibyte then interpret it as being in units of MiB. - suffix = 'm' if mem < MiB else 'b' + suffix = "m" if mem < MiB else "b" try: shift = ram_shift[suffix.lower()] except KeyError: - fatal(f'Unknown RAM suffix, {suffix}') + fatal(f"Unknown RAM suffix, {suffix}") mem <<= shift - debug(f'parse_ram({v}) = {mem}') + debug(f"parse_ram({v}) = {mem}") return mem + def get_mac(ifname): - p = subprocess.run(['/usr/sbin/dladm', 'show-vnic', - '-p', '-o', 'macaddress', ifname], stdout=subprocess.PIPE) + p = subprocess.run( + ["/usr/sbin/dladm", "show-vnic", "-p", "-o", "macaddress", ifname], + stdout=subprocess.PIPE, + ) if p.returncode != 0: - warning(f'Could not find MAC address for VNIC {ifname}') + warning(f"Could not find MAC address for VNIC {ifname}") return None - mac = p.stdout.decode('utf-8').strip() + mac = p.stdout.decode("utf-8").strip() # Need to zero-pad the bytes - return ':'.join(l.zfill(2) for l in mac.split(':')) + return ":".join(l.zfill(2) for l in mac.split(":")) + # Vim hints # vim:ts=4:sw=4:et:fdm=marker diff --git a/src/brand/bhyve/bundle.py b/src/brand/bhyve/bundle.py index 0beca54b3..484165c07 100644 --- a/src/brand/bhyve/bundle.py +++ b/src/brand/bhyve/bundle.py @@ -20,11 +20,14 @@ # If PYTHONPATH is set in the environment and the environment is not # being ignored, then don't adjust the path. -if 'PYTHONPATH' not in os.environ or getattr(sys.flags, 'ignore_environment'): +if "PYTHONPATH" not in os.environ or getattr(sys.flags, "ignore_environment"): sys.path, remainder = sys.path[:2], sys.path[2:] - addsitedir("{}/python{}".format( - os.path.dirname(__file__), - '.'.join(platform.python_version_tuple()[:2]))) + addsitedir( + "{}/python{}".format( + os.path.dirname(__file__), + ".".join(platform.python_version_tuple()[:2]), + ) + ) sys.path.extend(remainder) # Vim hints diff --git a/src/brand/bhyve/uefi/align.py b/src/brand/bhyve/uefi/align.py index 794101a33..69ca87647 100644 --- a/src/brand/bhyve/uefi/align.py +++ b/src/brand/bhyve/uefi/align.py @@ -1,4 +1,3 @@ - # {{{ CDDL HEADER # # This file and its contents are supplied under the terms of the @@ -20,8 +19,13 @@ # # See https://github.com/construct/construct/issues/980 -from construct import Aligned as _Aligned, \ - stream_tell, stream_read, stream_write +from construct import ( + Aligned as _Aligned, + stream_tell, + stream_read, + stream_write, +) + class Aligned(_Aligned): def _pad(self, stream, path): @@ -41,4 +45,3 @@ def _build(self, obj, stream, context, path): pad = self._pad(stream, path) stream_write(stream, self.pattern * pad, pad, path) return buildret - diff --git a/src/brand/bhyve/uefi/vars.py b/src/brand/bhyve/uefi/vars.py index fd3d0f93d..de218ef30 100644 --- a/src/brand/bhyve/uefi/vars.py +++ b/src/brand/bhyve/uefi/vars.py @@ -22,8 +22,8 @@ from . import align setGlobalPrintFullStrings(True) -#setGlobalPrintFalseFlags(True) -#setGlobalPrintPrivateEntries(True) +# setGlobalPrintFalseFlags(True) +# setGlobalPrintPrivateEntries(True) # The uefi-edk2 nvram firmware volume is divided into sections as follows: # @@ -34,24 +34,26 @@ # # This is valid for firmware generated with an FD_SIZE of 1024 or 2048. -VAR_STORE_VOLUME_SIZE = 0xe000 +VAR_STORE_VOLUME_SIZE = 0xE000 -VAR_STORE_FORMATTED = 0x5a -VAR_STORE_HEALTHY = 0xfe +VAR_STORE_FORMATTED = 0x5A +VAR_STORE_HEALTHY = 0xFE -VARIABLE_DATA = 0x55aa +VARIABLE_DATA = 0x55AA -VAR_ADDED = 0x3f -VAR_DELETED = 0xfd -VAR_IN_DELETED_TRANSITION = 0xfe -VAR_HEADER_VALID_ONLY = 0x7f +VAR_ADDED = 0x3F +VAR_DELETED = 0xFD +VAR_IN_DELETED_TRANSITION = 0xFE +VAR_HEADER_VALID_ONLY = 0x7F VAR_ADDED_TRANSITION = VAR_ADDED & VAR_IN_DELETED_TRANSITION VAR_DELETED_TRANSITION = VAR_ADDED & VAR_DELETED & VAR_IN_DELETED_TRANSITION GLOBAL_VARIABLE_GUID = "8be4df61-93ca-11d2-aa0d-00e098032b8c" -EfiGuid = Union(0, - "efiguid" / Struct( +EfiGuid = Union( + 0, + "efiguid" + / Struct( "data1" / Hex(Int32ul), "data2" / Hex(Int16ul), "data3" / Hex(Int16ul), @@ -68,11 +70,11 @@ "hour" / Int8ul, "min" / Int8ul, "sec" / Int8ul, - "_pad1" / Int8ul, # padding + "_pad1" / Int8ul, # padding "nanosec" / Int32ul, "tz" / Int16ul, "daylight" / Int8ul, - "_pad2" / Int8ul, # padding + "_pad2" / Int8ul, # padding ) BlockMapEntry = Struct( @@ -85,22 +87,24 @@ "size" / Int32ul, "format" / Const(VAR_STORE_FORMATTED, Int8ul), "state" / Const(VAR_STORE_HEALTHY, Int8ul), - "_rsvd1" / Int16ul, # reserved - "_rsvd1" / Int32ul, # reserved + "_rsvd1" / Int16ul, # reserved + "_rsvd1" / Int32ul, # reserved ) AuthVariable = Struct( "offset" / Hex(Tell), "startid" / Int16ul, "state" / Hex(Int8ul), - "_rsvd1" / Int8ul, # reserved - "attributes" / FlagsEnum(Int32ul, - EFI_VARIABLE_NON_VOLATILE = 0x00000001, - EFI_VARIABLE_BOOTSERVICE_ACCESS = 0x00000002, - EFI_VARIABLE_RUNTIME_ACCESS = 0x00000004, - EFI_VARIABLE_HARDWARE_ERROR_RECORD = 0x00000008, - EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS = 0x00000020, - EFI_VARIABLE_APPEND_WRITE = 0x00000040, + "_rsvd1" / Int8ul, # reserved + "attributes" + / FlagsEnum( + Int32ul, + EFI_VARIABLE_NON_VOLATILE=0x00000001, + EFI_VARIABLE_BOOTSERVICE_ACCESS=0x00000002, + EFI_VARIABLE_RUNTIME_ACCESS=0x00000004, + EFI_VARIABLE_HARDWARE_ERROR_RECORD=0x00000008, + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS=0x00000020, + EFI_VARIABLE_APPEND_WRITE=0x00000040, ), "count" / Int64ul, "timestamp" / EfiTime, @@ -109,7 +113,7 @@ "datalen" / Int32ul, "guid" / EfiGuid, "name" / CString("utf_16_le"), - "data" / align.Aligned(4, Bytes(this.datalen), pattern=b'\xff'), + "data" / align.Aligned(4, Bytes(this.datalen), pattern=b"\xff"), "next" / Peek(Int16ul), ) @@ -117,12 +121,12 @@ "zero_vector" / Array(2, Int64ul), "guid" / EfiGuid, "volsize" / Int64ul, - "signature" / Const(b'_FVH'), + "signature" / Const(b"_FVH"), "attributes" / Int32ul, "headerlen" / Int16ul, "checksum" / Int16ul, "ext_hdr_ofset" / Const(0, Int16ul), - "_rsvd1" / Int8ul, # reserved + "_rsvd1" / Int8ul, # reserved "revision" / Const(2, Int8ul), "maps" / RepeatUntil(obj_.num == 0 and obj_.len == 0, BlockMapEntry), "header" / VariableStoreHeader, @@ -141,19 +145,22 @@ ) BootEntry = Struct( - "attributes" / FlagsEnum(Int32ul, - LOAD_OPTION_ACTIVE = 0x00000001, - LOAD_OPTION_FORCE_RECONNECT = 0x00000002, - LOAD_OPTION_HIDDEN = 0x00000008, - LOAD_OPTION_CATEGORY_APP = 0x00000100, + "attributes" + / FlagsEnum( + Int32ul, + LOAD_OPTION_ACTIVE=0x00000001, + LOAD_OPTION_FORCE_RECONNECT=0x00000002, + LOAD_OPTION_HIDDEN=0x00000008, + LOAD_OPTION_CATEGORY_APP=0x00000100, ), "fplen" / Int16ul, "title" / CString("utf_16_le"), - "paths" / RepeatUntil(obj_.type == 0x7f and obj_.subtype == 0xff, - DevicePath), + "paths" + / RepeatUntil(obj_.type == 0x7F and obj_.subtype == 0xFF, DevicePath), "data" / GreedyRange(Byte), ) + class UEFIVars: path = None _data = None @@ -164,7 +171,7 @@ class UEFIVars: def __init__(self, path): self.path = path - with open(path, 'rb') as f: + with open(path, "rb") as f: self._data = f.read(VAR_STORE_VOLUME_SIZE) self.volume = Volume.parse(self._data) @@ -173,15 +180,14 @@ def __init__(self, path): self._parse_bootoptions() def _parse_bootoptions(self): - def be_index(x): - return int(x.name[4:], 16) if x.name.startswith('Boot0') else 255 + return int(x.name[4:], 16) if x.name.startswith("Boot0") else 255 def is_be(x): return ( - x.state == VAR_ADDED and - x.guid.str == GLOBAL_VARIABLE_GUID and - x.name.startswith('Boot0') + x.state == VAR_ADDED + and x.guid.str == GLOBAL_VARIABLE_GUID + and x.name.startswith("Boot0") ) fmap = {} @@ -190,15 +196,17 @@ def is_be(x): index = be_index(v) data = BootEntry.parse(v.data) - if (not data.attributes.LOAD_OPTION_ACTIVE or - data.attributes.LOAD_OPTION_HIDDEN): + if ( + not data.attributes.LOAD_OPTION_ACTIVE + or data.attributes.LOAD_OPTION_HIDDEN + ): continue guid = pci = path = None uri = False for p in data.paths: if p.type == 1 and p.subtype == 1 and p.datalen == 2: - pci = '{1}.{0}'.format(*p.data) + pci = "{1}.{0}".format(*p.data) if p.type == 4 and p.subtype == 6 and p.datalen == 16: # App, read GUID guid = EfiGuid.parse(p.data).str @@ -209,13 +217,13 @@ def is_be(x): entry = None if pci and uri: - entry = ('pci', pci, 'http') + entry = ("pci", pci, "http") elif pci: - entry = ('pci', pci) + entry = ("pci", pci) elif guid: - entry = ('app', guid) + entry = ("app", guid) elif path: - entry = ('path', path) + entry = ("path", path) paths.append(index) if entry: @@ -225,30 +233,30 @@ def is_be(x): self.bootrmap = {v: k for k, v in fmap.items()} for i in fmap.keys(): - self.bootrmap[('boot', i)] = i + self.bootrmap[("boot", i)] = i if paths: - self.bootrmap[('path',)] = paths[0] + self.bootrmap[("path",)] = paths[0] for i, pi in enumerate(paths): - self.bootrmap[('path', i)] = pi + self.bootrmap[("path", i)] = pi def print_vars(self): i = 0 for v in self.vars: if v.state == VAR_ADDED: - s = ' ' + s = " " else: - s = 'DEL' - print(f'[{i:2}] {s} {v.name} size {v.datalen}') + s = "DEL" + print(f"[{i:2}] {s} {v.name} size {v.datalen}") i += 1 def _find_var(self, name, guid): """Looks for a variable with the provided 'name' and 'guid'. - This function will return the active variable if it exists, - otherwise it will return the last found variable, which may be - in the deleted state. If no variable is found, a new one will - be created, initialised with defaults and added to the in-memory - variable list.""" + This function will return the active variable if it exists, + otherwise it will return the last found variable, which may be + in the deleted state. If no variable is found, a new one will + be created, initialised with defaults and added to the in-memory + variable list.""" last = None for v in self.vars: if v.name != name or v.guid.str != guid: @@ -262,13 +270,13 @@ def _find_var(self, name, guid): return last # Build new variable - v = AuthVariable.parse(b'\x00' * 0x100) - v.startid = VARIABLE_DATA; + v = AuthVariable.parse(b"\x00" * 0x100) + v.startid = VARIABLE_DATA v.state = VAR_ADDED v.attributes.EFI_VARIABLE_NON_VOLATILE = True v.attributes.EFI_VARIABLE_BOOTSERVICE_ACCESS = True v.attributes.EFI_VARIABLE_RUNTIME_ACCESS = True - v.name = name; + v.name = name v.namelen = (len(name) + 1) * 2 v.guid = EfiGuid.parse(UUID(guid).bytes_le) @@ -295,20 +303,26 @@ def defrag(self): # name. # Build a list of existing VAR_ADDED variables - added = [f'{v.guid.str}/{v.name}' - for v in self.vars if v.state == VAR_ADDED] - - vars = [v for v in self.vars - if v.state == VAR_ADDED or - (v.state == (VAR_ADDED & VAR_IN_DELETED_TRANSITION) and - f'{v.guid.str}/{v.name}' not in added)] + added = [ + f"{v.guid.str}/{v.name}" for v in self.vars if v.state == VAR_ADDED + ] + + vars = [ + v + for v in self.vars + if v.state == VAR_ADDED + or ( + v.state == (VAR_ADDED & VAR_IN_DELETED_TRANSITION) + and f"{v.guid.str}/{v.name}" not in added + ) + ] # Now promote any remaining ADDED/IN_DELETED_TRANSITION entries for v in vars: v.state = VAR_ADDED # Mark the new last element as the terminator - self.vars[-1].next = 0xffff + self.vars[-1].next = 0xFFFF self.volume.vars = ListContainer(vars) @@ -322,9 +336,12 @@ def write_volume(self, fh): def write(self, path=None): if not path: path = self.path - with tempfile.NamedTemporaryFile(mode='w+b', - dir=os.path.dirname(self.path), prefix='uefivars.', - delete=False) as fh: + with tempfile.NamedTemporaryFile( + mode="w+b", + dir=os.path.dirname(self.path), + prefix="uefivars.", + delete=False, + ) as fh: pad = self.write_volume(fh) if pad < 0: # Variable store overflow into event log section. @@ -332,8 +349,8 @@ def write(self, path=None): pad = self.write_volume(fh) if pad < 0: - raise OverflowError('Variable store too large') - fh.write(b'\xff' * pad) + raise OverflowError("Variable store too large") + fh.write(b"\xff" * pad) tf = fh.name @@ -351,20 +368,21 @@ def set_bootorder(self, options): if not len(order): raise KeyError - v = self._find_var('BootOrder', GLOBAL_VARIABLE_GUID) + v = self._find_var("BootOrder", GLOBAL_VARIABLE_GUID) v.state = VAR_ADDED v.data = Array(len(order), Int16ul).build(order) v.datalen = len(v.data) def set_bootnext(self, opt): - opt = self.bootrmap[opt] # can raise KeyError + opt = self.bootrmap[opt] # can raise KeyError - v = self._find_var('BootNext', GLOBAL_VARIABLE_GUID) + v = self._find_var("BootNext", GLOBAL_VARIABLE_GUID) v.state = VAR_ADDED v.data = Int16ul.build(opt) v.datalen = len(v.data) + # Vim hints # vim:ts=4:sw=4:et:fdm=marker diff --git a/src/brand/emu/boot.py b/src/brand/emu/boot.py index 16b4ca808..016009ed0 100755 --- a/src/brand/emu/boot.py +++ b/src/brand/emu/boot.py @@ -17,8 +17,17 @@ import getopt import bootlib -from bootlib import fatal, error, debug, info, warning, boolv, diskpath, \ - expandopts, collapseopts +from bootlib import ( + fatal, + error, + debug, + info, + warning, + boolv, + diskpath, + expandopts, + collapseopts, +) import logging import os import re @@ -32,7 +41,9 @@ from itertools import zip_longest from pprint import pprint, pformat -QEMUROOT = '/opt/ooce/qemu' +QEMUROOT = "/opt/ooce/qemu" + +# fmt: off # Default values opts = { @@ -68,102 +79,110 @@ } } +# fmt: on + ############################################################################## -testmode = False -jsonmode = False -name = None -xmlfile = None +testmode = False +jsonmode = False +name = None +xmlfile = None uc = ucred.get(os.getpid()) if not uc.has_priv("Effective", "sys_config"): testmode = True + def usage(msg=None): - print(''' + print( + """ boot [-t] [-x xml] <[-z] zone> -t Test mode - just show what would be done -j Output the computed zone data in JSON format -x Path to zone's XML file -z Name of zone -''') +""" + ) - if msg: print(msg) + if msg: + print(msg) sys.exit(2) + try: cliopts, args = getopt.getopt(sys.argv[1:], "jtx:z:") except getopt.GetoptError: usage() for opt, arg in cliopts: - if opt == '-t': + if opt == "-t": testmode = True - elif opt == '-j': + elif opt == "-j": jsonmode = True testmode = True bootlib.set_quiet(True) - elif opt == '-x': + elif opt == "-x": xmlfile = arg - elif opt == '-z': + elif opt == "-z": name = arg else: - fatal(f'unhandled option, {opt}') + fatal(f"unhandled option, {opt}") if not name and len(args): name = args.pop(0) if len(args): - usage('Unexpected arguments') + usage("Unexpected arguments") if not name: - usage('No zone name supplied') + usage("No zone name supplied") bootlib.log_stdout(logging.DEBUG) if not xmlfile: - xmlfile = f'/etc/zones/{name}.xml' + xmlfile = f"/etc/zones/{name}.xml" z = bootlib.Zone(xmlfile) if z.name != name: - fatal(f'Zone name {name} does not match XML file {z.name}') + fatal(f"Zone name {name} does not match XML file {z.name}") if not testmode and not os.path.isdir(z.zoneroot): - fatal(f'Could not find zone root {z.zoneroot}') + fatal(f"Could not find zone root {z.zoneroot}") if not testmode: try: - os.unlink(f'{z.zoneroot}/tmp/init.log') + os.unlink(f"{z.zoneroot}/tmp/init.log") except: pass - bootlib.log_file(f'{z.zonepath}/log/zone.log', logging.DEBUG) + bootlib.log_file(f"{z.zonepath}/log/zone.log", logging.DEBUG) -info(f'Zone name: {name}') +info(f"Zone name: {name}") ############################################################################## for tag in opts.keys(): z.parseopt(tag, opts, aliases) -for a in ['arch', 'cpu']: +for a in ["arch", "cpu"]: if not opts[a]: fatal(f'The "{a}" attribute is required') qemu = f'{QEMUROOT}/bin/qemu-system-{opts["arch"]}' if not os.path.exists(qemu): - fatal('{qemu} not found') + fatal("{qemu} not found") # UUID -uuid = opts['uuid'] if opts['uuid'] else z.uuid() -debug(f'Final uuid: {uuid}') +uuid = opts["uuid"] if opts["uuid"] else z.uuid() +debug(f"Final uuid: {uuid}") ############################################################################## -def add_disk(path, boot=False, intf=None, media='disk', index=-1): + +def add_disk(path, boot=False, intf=None, media="disk", index=-1): global args if not intf: - intf = opts['diskif'] + intf = opts["diskif"] if index < 0: index = add_disk.index @@ -172,182 +191,212 @@ def add_disk(path, boot=False, intf=None, media='disk', index=-1): add_disk.index = index if media == "cdrom": - args.extend(['-cdrom', path]) + args.extend(["-cdrom", path]) else: - node = f'{media}{index}' + node = f"{media}{index}" path = diskpath(path) - devstr = f'{intf},drive={node},serial={node}' - #if boot: + devstr = f"{intf},drive={node},serial={node}" + # if boot: # devstr += ',boot=on' - args.extend([ - '-blockdev', - f'driver=host_device,filename={path},node-name={node},discard=unmap', - '-device', devstr, - ]) + args.extend( + [ + "-blockdev", + f"driver=host_device,filename={path},node-name={node},discard=unmap", + "-device", + devstr, + ] + ) + + add_disk.index = 0 ############################################################################## args = [] -args.extend(['-name', name]) - -args.extend([ - '-L', f'{QEMUROOT}/share/qemu', - '-smp', opts['vcpus'], - '-m', opts['ram'], - '-rtc', opts['rtc'], - '-pidfile', '/tmp/vm.pid', - '-monitor', 'unix:/tmp/vm.monitor,server,nowait,nodelay', - '-cpu', opts['cpu'], -]) +args.extend(["-name", name]) + +args.extend( + [ + "-L", + f"{QEMUROOT}/share/qemu", + "-smp", + opts["vcpus"], + "-m", + opts["ram"], + "-rtc", + opts["rtc"], + "-pidfile", + "/tmp/vm.pid", + "-monitor", + "unix:/tmp/vm.monitor,server,nowait,nodelay", + "-cpu", + opts["cpu"], + ] +) ser = uuid -if boolv(opts['cloud-init'], 'cloud-init', ignore=True) is not False: - if opts['cloud-init'].startswith(('http://', 'https://')): - ser = 'ds=nocloud-net;s={};i={}'.format(opts['cloud-init'], uuid) +if boolv(opts["cloud-init"], "cloud-init", ignore=True) is not False: + if opts["cloud-init"].startswith(("http://", "https://")): + ser = "ds=nocloud-net;s={};i={}".format(opts["cloud-init"], uuid) else: - z.build_cloudinit_image(uuid, opts['cloud-init'], testmode) - ser = f'ds=nocloud;i={uuid}' - add_disk('/cloud-init.iso', boot=False, intf='ide', media='cdrom') - -args.extend(['-smbios', - 'type=1,manufacturer={},product={},version={},serial={},uuid={},family={}' - .format('OmniOS', 'OmniOS HVM', '1.0', ser, uuid, 'Virtual Machine') -]) + z.build_cloudinit_image(uuid, opts["cloud-init"], testmode) + ser = f"ds=nocloud;i={uuid}" + add_disk("/cloud-init.iso", boot=False, intf="ide", media="cdrom") + +args.extend( + [ + "-smbios", + "type=1,manufacturer={},product={},version={},serial={},uuid={},family={}".format( + "OmniOS", "OmniOS HVM", "1.0", ser, uuid, "Virtual Machine" + ), + ] +) if uuid: - args.extend(['-uuid', uuid]) + args.extend(["-uuid", uuid]) # Console -args.extend([ - '-chardev', opts['console'], - '-serial', 'chardev:console0', -]); +args.extend( + [ + "-chardev", + opts["console"], + "-serial", + "chardev:console0", + ] +) # CDROM -for i, v in z.build_devlist('cdrom', 5): - add_disk(v, boot=False, intf='ide', media='cdrom') +for i, v in z.build_devlist("cdrom", 5): + add_disk(v, boot=False, intf="ide", media="cdrom") # If the disks are not using IDE, then reset their index as there is no need # to leave room for the CDROM. -if opts['diskif'] != 'ide': +if opts["diskif"] != "ide": add_disk.index = 0 # Network vlan = 0 -for f in z.findall('./network[@physical]'): - ifname = f.get('physical').strip() +for f in z.findall("./network[@physical]"): + ifname = f.get("physical").strip() mac = bootlib.get_mac(ifname) - if not mac: continue + if not mac: + continue - if opts['netif'] == 'e1000': + if opts["netif"] == "e1000": # Unlikely to be right yet - args.extend([ - '-net', - 'nic,name=net{2},model={0},macaddr={1}' - .format(opts['netif'], mac, vlan), - ]) + args.extend( + [ + "-net", + "nic,name=net{2},model={0},macaddr={1}".format( + opts["netif"], mac, vlan + ), + ] + ) else: - args.extend([ - '-device', - f'{opts["netif"]},netdev=net{vlan},mac={mac}' - ]) + args.extend(["-device", f'{opts["netif"]},netdev=net{vlan},mac={mac}']) - - args.extend([ - '-netdev', f'vnic,id=net{vlan},ifname={ifname}', - ]) + args.extend( + [ + "-netdev", + f"vnic,id=net{vlan},ifname={ifname}", + ] + ) vlan += 1 # Bootdisk try: - bootdisk = z.findattr('bootdisk') - add_disk(bootdisk.get('value').strip(), boot=True) + bootdisk = z.findattr("bootdisk") + add_disk(bootdisk.get("value").strip(), boot=True) except: pass # Additional Disks -for i, v in z.build_devlist('disk', 16): +for i, v in z.build_devlist("disk", 16): add_disk(v) # Display -if boolv(opts['vga'], 'vga', ignore=True) is False: - args.append('-nographic') -elif boolv(opts['vnc'], 'vnc', ignore=True) is False: - args.extend(['-display', 'none']) +if boolv(opts["vga"], "vga", ignore=True) is False: + args.append("-nographic") +elif boolv(opts["vnc"], "vnc", ignore=True) is False: + args.extend(["-display", "none"]) else: - args.extend(['-display', 'vnc=0']) - args.extend(['-vnc', opts['vnc']]) + args.extend(["-display", "vnc=0"]) + args.extend(["-vnc", opts["vnc"]]) # RNG -if boolv(opts['rng'], 'rng'): - args.extend([ - '-object', 'rng-builtin,id=random', - ]) +if boolv(opts["rng"], "rng"): + args.extend( + [ + "-object", + "rng-builtin,id=random", + ] + ) # Extra options -for i, v in z.build_devlist('extra', 16): +for i, v in z.build_devlist("extra", 16): args.extend(shlex.split(v)) ############################################################################## -debug(f'Final arguments:\n{qemu} {pformat(args)}') +debug(f"Final arguments:\n{qemu} {pformat(args)}") info(qemu) -info('{0}'.format(' '.join(map( - lambda s: f'"{s}"' if ' ' in s else s, args)))) +info("{0}".format(" ".join(map(lambda s: f'"{s}"' if " " in s else s, args)))) + def writecfg(fh, arg, nl=True): - end='\n' if nl else ' ' + end = "\n" if nl else " " if testmode: - print(arg, end=end) + print(arg, end=end) else: - fh.write(f'{arg}\n') + fh.write(f"{arg}\n") + fh = None if not testmode: try: - fh = tempfile.NamedTemporaryFile(mode='w', dir=f'{z.zoneroot}/etc', - prefix='emu.', delete=False) + fh = tempfile.NamedTemporaryFile( + mode="w", dir=f"{z.zoneroot}/etc", prefix="emu.", delete=False + ) except Exception as e: - fatal(f'Could not create temporary file: {e}') + fatal(f"Could not create temporary file: {e}") else: - debug(f'Created temporary file at {fh.name}') + debug(f"Created temporary file at {fh.name}") try: - os.unlink(f'{z.zoneroot}/qemu-system') + os.unlink(f"{z.zoneroot}/qemu-system") except: pass - os.symlink(qemu, f'{z.zoneroot}/qemu-system') + os.symlink(qemu, f"{z.zoneroot}/qemu-system") -writecfg(fh, '#\n# Generated from zone configuration\n#') +writecfg(fh, "#\n# Generated from zone configuration\n#") -for (arg, narg) in zip_longest(args, args[1:]): - writecfg(fh, arg, not narg or narg.startswith('-')) +for arg, narg in zip_longest(args, args[1:]): + writecfg(fh, arg, not narg or narg.startswith("-")) -#if vncpassword: +# if vncpassword: # writecfg(fh, f'pci.0.{VNC_SLOT}.0.password={vncpassword}') if not testmode: tf = fh.name fh.close() try: - os.rename(tf, f'{z.zoneroot}/etc/qemu.cfg') + os.rename(tf, f"{z.zoneroot}/etc/qemu.cfg") except Exception as e: - fatal(f'Could not create qemu.cfg from temporary file: {e}') + fatal(f"Could not create qemu.cfg from temporary file: {e}") else: - info(f'Successfully created {z.zoneroot}/etc/qemu.cfg') + info(f"Successfully created {z.zoneroot}/etc/qemu.cfg") # Vim hints # vim:ts=4:sw=4:et:fdm=marker diff --git a/src/brand/ipkg/fmri_compare.py b/src/brand/ipkg/fmri_compare.py index 742fb88cf..efb9fb27d 100644 --- a/src/brand/ipkg/fmri_compare.py +++ b/src/brand/ipkg/fmri_compare.py @@ -28,36 +28,40 @@ import pkg.fmri import sys + def usage(): - print("usage: %s ".format(sys.argv[0]), file=sys.stderr) - sys.exit(2) + print("usage: %s ".format(sys.argv[0]), file=sys.stderr) + sys.exit(2) + if len(sys.argv) != 3: - usage() + usage() try: - x = pkg.fmri.PkgFmri(sys.argv[1]) - y = pkg.fmri.PkgFmri(sys.argv[2]) + x = pkg.fmri.PkgFmri(sys.argv[1]) + y = pkg.fmri.PkgFmri(sys.argv[2]) except pkg.fmri.FmriError as e: - print ("error: %s" % str(e) , file=sys.stderr) - sys.exit(1) + print("error: %s" % str(e), file=sys.stderr) + sys.exit(1) if not x.is_same_pkg(y): - print ("error: can only compare two versions of the same package.", - file=sys.stderr) - sys.exit(1) + print( + "error: can only compare two versions of the same package.", + file=sys.stderr, + ) + sys.exit(1) if x < y: - print("<") + print("<") elif x > y: - print(">") + print(">") elif x == y: - print("=") + print("=") else: - print ("panic", file=sys.stderr) - sys.exit(1) + print("panic", file=sys.stderr) + sys.exit(1) sys.exit(0) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/brand/kvm/init.py b/src/brand/kvm/init.py index 6c33edffa..7b6780e9c 100755 --- a/src/brand/kvm/init.py +++ b/src/brand/kvm/init.py @@ -19,8 +19,8 @@ import xml.etree.ElementTree as etree from pprint import pprint, pformat -zonexml = '/etc/zone.xml' -uuidfile = '/etc/uuid' +zonexml = "/etc/zone.xml" +uuidfile = "/etc/uuid" testmode = False try: @@ -29,18 +29,21 @@ print("init [-t] [-x ] [-u ]") sys.exit(2) for opt, arg in opts: - if opt == '-t': + if opt == "-t": testmode = True - elif opt == '-x': + elif opt == "-x": zonexml = arg - elif opt == '-u': + elif opt == "-u": uuidfile = arg if testmode: logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) else: - logging.basicConfig(filename='/tmp/init.log', filemode='a', - level=logging.DEBUG) + logging.basicConfig( + filename="/tmp/init.log", filemode="a", level=logging.DEBUG + ) + +# fmt: off # Default values opts = { @@ -70,79 +73,90 @@ } } +# fmt: on + try: with open(uuidfile) as file: uuid = file.read().strip() - logging.info('Zone UUID: {0}'.format(uuid)) + logging.info("Zone UUID: {0}".format(uuid)) except: uuid = None try: cfg = etree.parse(zonexml) except: - logging.error('Could not parse {0}'.format(zonexml)) + logging.error("Could not parse {0}".format(zonexml)) sys.exit(1) root = cfg.getroot() -logging.info('Parsed {0}'.format(zonexml)) +logging.info("Parsed {0}".format(zonexml)) ############################################################################## + def fatal(msg): logging.error(msg) print(msg, file=sys.stderr) sys.exit(1) + def boolv(s, var, ignore=False): - if s in ['true', 'yes', 'on', '1']: + if s in ["true", "yes", "on", "1"]: return True - if s in ['false', 'no', 'off', '0']: + if s in ["false", "no", "off", "0"]: return False if not ignore: - fatal(f'Invalid value {s} for boolean variable {var}') + fatal(f"Invalid value {s} for boolean variable {var}") return None + def opt(tag): global opts, root try: el = root.find('./attr[@name="{0}"]'.format(tag)) - opts[tag] = el.get('value').strip() - logging.debug('Found custom {0} attribute - "{1}"' - .format(tag, opts[tag])) + opts[tag] = el.get("value").strip() + logging.debug( + 'Found custom {0} attribute - "{1}"'.format(tag, opts[tag]) + ) if tag in aliases: val = opts[tag] if (bt := boolv(val, tag, ignore=True)) is not None: - val = 'on' if bt else 'off' + val = "on" if bt else "off" try: opts[tag] = aliases[tag][val] - logging.debug(' Alias expanded to {0}'.format(opts[tag])) + logging.debug(" Alias expanded to {0}".format(opts[tag])) except KeyError: pass except: pass + def diskpath(arg): - if arg.startswith('/dev'): + if arg.startswith("/dev"): return arg - return '/dev/zvol/rdsk/{0}'.format(arg) + return "/dev/zvol/rdsk/{0}".format(arg) + for tag in opts.keys(): opt(tag) + # Look for attributes of the form or N and generate a list. def build_devlist(type, maxval): devlist = {} - for dev in root.findall('./attr[@name]'): - m = re.search(rf'^{type}(\d+)$', dev.get('name').strip()) - if not m: continue + for dev in root.findall("./attr[@name]"): + m = re.search(rf"^{type}(\d+)$", dev.get("name").strip()) + if not m: + continue k = int(m.group(1)) if k in devlist: logging.error( - '{}{} appears more than once in configuration'.format(type, k)) + "{}{} appears more than once in configuration".format(type, k) + ) sys.exit(1) - if (k > maxval): - logging.error('{}{} slot out of range'.format(type, k)) + if k > maxval: + logging.error("{}{} slot out of range".format(type, k)) sys.exit(1) - devlist[k] = dev.get('value').strip() + devlist[k] = dev.get("value").strip() # Now insert plain tags into the list, using available slots in order avail = sorted(set(range(0, maxval)).difference(sorted(devlist.keys()))) @@ -150,46 +164,60 @@ def build_devlist(type, maxval): try: k = avail.pop(0) except IndexError: - logging.error('{}: no more available slots'.format(type)) + logging.error("{}: no more available slots".format(type)) sys.exit(1) - devlist[k] = dev.get('value').strip() + devlist[k] = dev.get("value").strip() - logging.debug('{} list: \n{}'.format(type, pformat(devlist))) + logging.debug("{} list: \n{}".format(type, pformat(devlist))) return devlist + ############################################################################## -args = ['/usr/bin/qemu-system-x86_64'] +args = ["/usr/bin/qemu-system-x86_64"] -name = root.get('name') -args.extend(['-name', name]) +name = root.get("name") +args.extend(["-name", name]) if uuid: - args.extend(['-uuid', uuid]) - -args.extend([ - '-enable-kvm', - '-no-hpet', - '-m', opts['ram'], - '-smp', opts['vcpus'], - '-cpu', opts['cpu'], - '-rtc', opts['rtc'], - '-pidfile', '/tmp/vm.pid', - '-monitor', 'unix:/tmp/vm.monitor,server,nowait,nodelay', - '-vga', 'std', - '-chardev', opts['console'], - '-serial', 'chardev:console0', - '-boot', 'order={0}'.format(opts['bootorder']), -]) + args.extend(["-uuid", uuid]) + +args.extend( + [ + "-enable-kvm", + "-no-hpet", + "-m", + opts["ram"], + "-smp", + opts["vcpus"], + "-cpu", + opts["cpu"], + "-rtc", + opts["rtc"], + "-pidfile", + "/tmp/vm.pid", + "-monitor", + "unix:/tmp/vm.monitor,server,nowait,nodelay", + "-vga", + "std", + "-chardev", + opts["console"], + "-serial", + "chardev:console0", + "-boot", + "order={0}".format(opts["bootorder"]), + ] +) # Disks -def add_disk(path, boot=False, intf=None, media='disk', index=-1): + +def add_disk(path, boot=False, intf=None, media="disk", index=-1): global args if not intf: - intf = opts['diskif'] + intf = opts["diskif"] if index < 0: index = add_disk.index @@ -197,99 +225,111 @@ def add_disk(path, boot=False, intf=None, media='disk', index=-1): elif index > add_disk.index: add_disk.index = index - if media == "disk": path = diskpath(path) - str = ( - 'file={0},if={1},media={2},index={3},cache=none' - .format(path, intf, media, index) + if media == "disk": + path = diskpath(path) + str = "file={0},if={1},media={2},index={3},cache=none".format( + path, intf, media, index ) - if ',serial=' not in str: - str += ',serial={}{}'.format(media, index) + if ",serial=" not in str: + str += ",serial={}{}".format(media, index) if boot: - str += ',boot=on' - args.extend(['-drive', str]) + str += ",boot=on" + args.extend(["-drive", str]) + + add_disk.index = 0 -for i, v in build_devlist('cdrom', 5).items(): - add_disk(v, boot=False, intf='ide', media='cdrom') +for i, v in build_devlist("cdrom", 5).items(): + add_disk(v, boot=False, intf="ide", media="cdrom") # If the disks are not using IDE, then reset their index as there is no need # to leave room for the CDROM. -if opts['diskif'] != 'ide': +if opts["diskif"] != "ide": add_disk.index = 0 try: bootdisk = root.find('./attr[@name="bootdisk"]') - add_disk(bootdisk.get('value').strip(), boot=True) + add_disk(bootdisk.get("value").strip(), boot=True) except: pass -for i, v in build_devlist('disk', 10).items(): +for i, v in build_devlist("disk", 10).items(): add_disk(v) # Network + def get_mac(ifname): - ret = subprocess.run(['/usr/sbin/dladm', 'show-vnic', - '-p', '-o', 'macaddress', ifname], stdout=subprocess.PIPE) - mac = ret.stdout.decode('utf-8').strip() + ret = subprocess.run( + ["/usr/sbin/dladm", "show-vnic", "-p", "-o", "macaddress", ifname], + stdout=subprocess.PIPE, + ) + mac = ret.stdout.decode("utf-8").strip() # Need to zero-pad the bytes - return ':'.join(l.zfill(2) for l in mac.split(':')) + return ":".join(l.zfill(2) for l in mac.split(":")) + vlan = 0 -for f in root.findall('./network[@physical]'): - ifname = f.get('physical').strip() +for f in root.findall("./network[@physical]"): + ifname = f.get("physical").strip() mac = get_mac(ifname) - if not len(mac): continue + if not len(mac): + continue - if opts['netif'] == 'e1000': + if opts["netif"] == "e1000": # -net nic,vlan=0,name=net0,model=e1000,macaddr=00:.. - args.extend([ - '-net', - 'nic,vlan={2},name=net{2},model={0},macaddr={1}' - .format(opts['netif'], mac, vlan), - ]) + args.extend( + [ + "-net", + "nic,vlan={2},name=net{2},model={0},macaddr={1}".format( + opts["netif"], mac, vlan + ), + ] + ) else: - # -device ,mac=00:.,tx=timer,x-txtimer=200000,x-txburst=128,vlan=0 - args.extend([ - '-device', - '{0},mac={1},tx=timer,x-txtimer=200000,x-txburst=128,vlan={2}' - .format(opts['netif'], mac, vlan), - ]) - + # -device ,mac=00:.,tx=timer,x-txtimer=200000,x-txburst=128,vlan=0 + args.extend( + [ + "-device", + "{0},mac={1},tx=timer,x-txtimer=200000,x-txburst=128,vlan={2}".format( + opts["netif"], mac, vlan + ), + ] + ) # -net vnic,vlan=0,name=net0,ifname=vnic22 - args.extend([ - '-net', - 'vnic,vlan={0},name=net{0},ifname={1}' - .format(vlan, ifname) - ]) + args.extend( + ["-net", "vnic,vlan={0},name=net{0},ifname={1}".format(vlan, ifname)] + ) vlan += 1 # VNC -args.extend(['-vnc', opts['vnc']]) +args.extend(["-vnc", opts["vnc"]]) # Extra options -for i, v in build_devlist('extra', 16): +for i, v in build_devlist("extra", 16): args.extend(shlex.split(v)) -logging.info('Final arguments: {0}'.format(pformat(args))) -logging.info('{0}'.format(' '.join(args))) +logging.info("Final arguments: {0}".format(pformat(args))) +logging.info("{0}".format(" ".join(args))) if testmode: sys.exit(0) errcnt = 0 while errcnt < 10: - logging.info('Starting kvm') + logging.info("Starting kvm") ret = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.info("KVM exited {0}".format(ret.returncode)) logging.error("Error {0}".format(ret.stderr)) logging.debug("Output {0}".format(ret.stdout)) - if ret.returncode == 0: break - if ret.returncode == 1: errcnt += 1 + if ret.returncode == 0: + break + if ret.returncode == 1: + errcnt += 1 # Vim hints # vim:ts=4:sw=4:et:fdm=marker diff --git a/src/cffi_src/build_arch.py b/src/cffi_src/build_arch.py index 49c33dfba..4aaa32768 100755 --- a/src/cffi_src/build_arch.py +++ b/src/cffi_src/build_arch.py @@ -29,13 +29,17 @@ ffi = FFI() -ffi.set_source("_arch", """ +ffi.set_source( + "_arch", + """ /* Includes */ #include #include -""") +""", +) -ffi.cdef(""" +ffi.cdef( + """ /* Macros */ #define SI_RELEASE 3 /* return release of operating system */ #define SI_ARCHITECTURE_32 516 /* basic 32-bit SI_ARCHITECTURE */ @@ -47,10 +51,11 @@ void *realloc(void *, size_t); void free(void *); int sysinfo(int, char *, long); -""") +""" +) if __name__ == "__main__": - ffi.emit_c_code("cffi_src/_arch.c") + ffi.emit_c_code("cffi_src/_arch.c") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/cffi_src/build_pspawn.py b/src/cffi_src/build_pspawn.py index 125509eaa..df175c5a2 100755 --- a/src/cffi_src/build_pspawn.py +++ b/src/cffi_src/build_pspawn.py @@ -29,7 +29,9 @@ ffi = FFI() -ffi.set_source("_pspawn", """ +ffi.set_source( + "_pspawn", + """ /* Includes */ #include #include @@ -40,9 +42,11 @@ int start_fd; posix_spawn_file_actions_t *fap; } walk_data; -""") +""", +) -ffi.cdef(""" +ffi.cdef( + """ /* Types */ typedef int... mode_t; /* file attribute type */ typedef int... pid_t; /* process id type */ @@ -77,10 +81,11 @@ const posix_spawnattr_t *, char *const [], char *const []); -""") +""" +) if __name__ == "__main__": - ffi.emit_c_code("cffi_src/_pspawn.c") + ffi.emit_c_code("cffi_src/_pspawn.c") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/cffi_src/build_sha512_t.py b/src/cffi_src/build_sha512_t.py index 095f9f10c..836412a72 100755 --- a/src/cffi_src/build_sha512_t.py +++ b/src/cffi_src/build_sha512_t.py @@ -30,13 +30,17 @@ ffi = FFI() -ffi.set_source("_sha512_t", """ +ffi.set_source( + "_sha512_t", + """ /* Includes */ #include #include -""") +""", +) -ffi.cdef(""" +ffi.cdef( + """ #define SHA512_224 9 #define SHA512_256 10 @@ -63,10 +67,11 @@ void SHA2Update(SHA2_CTX *ctx, const void *buf, size_t bufsz); void SHA2Final(void *digest, SHA2_CTX *ctx); void *memcpy(void *restrict s1, const void *restrict s2, size_t n); -""") +""" +) if __name__ == "__main__": - ffi.emit_c_code("cffi_src/_sha512_t.c") + ffi.emit_c_code("cffi_src/_sha512_t.c") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/cffi_src/build_sysattr.py b/src/cffi_src/build_sysattr.py index 31eb7976a..54d977dc5 100755 --- a/src/cffi_src/build_sysattr.py +++ b/src/cffi_src/build_sysattr.py @@ -29,16 +29,20 @@ ffi = FFI() -ffi.set_source("_sysattr", """ +ffi.set_source( + "_sysattr", + """ /* Includes */ #include #include #include #include #include -""") +""", +) -ffi.cdef(""" +ffi.cdef( + """ /* Macros */ #define NV_UNIQUE_NAME 0x1 @@ -143,10 +147,11 @@ char *nvpair_name(nvpair_t *); data_type_t nvpair_type(nvpair_t *); int nvpair_value_boolean_value(nvpair_t *, boolean_t *); -""") +""" +) if __name__ == "__main__": - ffi.emit_c_code("cffi_src/_sysattr.c") + ffi.emit_c_code("cffi_src/_sysattr.c") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/cffi_src/build_syscallat.py b/src/cffi_src/build_syscallat.py index f51e02188..7ae144e02 100755 --- a/src/cffi_src/build_syscallat.py +++ b/src/cffi_src/build_syscallat.py @@ -29,15 +29,19 @@ ffi = FFI() -ffi.set_source("_syscallat", """ +ffi.set_source( + "_syscallat", + """ /* Includes */ #include #include #include #include -""") +""", +) -ffi.cdef(""" +ffi.cdef( + """ /* Types */ typedef int... mode_t; /* file attribute type */ @@ -46,10 +50,11 @@ int openat(int, const char *, int, mode_t); int renameat(int, const char *, int, const char *); int unlinkat(int, const char *, int); -""") +""" +) if __name__ == "__main__": - ffi.emit_c_code("cffi_src/_syscallat.c") + ffi.emit_c_code("cffi_src/_syscallat.c") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/client.py b/src/client.py index a36b6d587..e5d7b82f8 100755 --- a/src/client.py +++ b/src/client.py @@ -49,75 +49,88 @@ import sys if sys.platform == "sunos5": - print(""" + print( + """ The Python environment on this system is damaged and missing a critical core component (pkg.site_paths) and can not be repaired with pkg(1). To recover this system reboot and select an alternate Boot Environment (BE) from the boot menu. From the alternate BE mount and run 'pkg fix' on this BE. -""") +""" + ) sys.exit(1) pkg.site_paths.init() try: - import calendar - import collections - import datetime - import errno - import getopt - import gettext - import itertools - import locale - import logging - import os - import re - import six - import socket - import sys - import tempfile - import textwrap - import time - import traceback - import pycurl - import atexit - import shutil - from six.moves.urllib.parse import urlparse, unquote - - import pkg - import pkg.actions as actions - import pkg.client.api as api - import pkg.client.api_errors as api_errors - import pkg.client.bootenv as bootenv - import pkg.client.client_api as client_api - import pkg.client.progress as progress - import pkg.client.linkedimage as li - import pkg.client.publisher as publisher - import pkg.client.transport.transport as transport - import pkg.client.options as options - import pkg.fmri as fmri - import pkg.json as json - import pkg.misc as misc - import pkg.pipeutils as pipeutils - import pkg.portable as portable - import pkg.version as version - - from importlib import reload - from pkg.client import global_settings - from pkg.client.api import (IMG_TYPE_ENTIRE, IMG_TYPE_PARTIAL, - IMG_TYPE_USER, RESULT_CANCELED, RESULT_FAILED_BAD_REQUEST, - RESULT_FAILED_CONFIGURATION, RESULT_FAILED_CONSTRAINED, - RESULT_FAILED_LOCKED, RESULT_FAILED_STORAGE, RESULT_NOTHING_TO_DO, - RESULT_SUCCEEDED, RESULT_FAILED_TRANSPORT, RESULT_FAILED_UNKNOWN, - RESULT_FAILED_OUTOFMEMORY, RESULT_FAILED_DISKSPACE) - from pkg.client.debugvalues import DebugValues - from pkg.client.pkgdefs import * - from pkg.misc import EmptyI, msg, emsg, PipeError - from pkg.client.plandesc import (OP_STAGE_PREP, OP_STAGE_EXEC, - OP_STAGE_PLAN) + import calendar + import collections + import datetime + import errno + import getopt + import gettext + import itertools + import locale + import logging + import os + import re + import six + import socket + import sys + import tempfile + import textwrap + import time + import traceback + import pycurl + import atexit + import shutil + from six.moves.urllib.parse import urlparse, unquote + + import pkg + import pkg.actions as actions + import pkg.client.api as api + import pkg.client.api_errors as api_errors + import pkg.client.bootenv as bootenv + import pkg.client.client_api as client_api + import pkg.client.progress as progress + import pkg.client.linkedimage as li + import pkg.client.publisher as publisher + import pkg.client.transport.transport as transport + import pkg.client.options as options + import pkg.fmri as fmri + import pkg.json as json + import pkg.misc as misc + import pkg.pipeutils as pipeutils + import pkg.portable as portable + import pkg.version as version + + from importlib import reload + from pkg.client import global_settings + from pkg.client.api import ( + IMG_TYPE_ENTIRE, + IMG_TYPE_PARTIAL, + IMG_TYPE_USER, + RESULT_CANCELED, + RESULT_FAILED_BAD_REQUEST, + RESULT_FAILED_CONFIGURATION, + RESULT_FAILED_CONSTRAINED, + RESULT_FAILED_LOCKED, + RESULT_FAILED_STORAGE, + RESULT_NOTHING_TO_DO, + RESULT_SUCCEEDED, + RESULT_FAILED_TRANSPORT, + RESULT_FAILED_UNKNOWN, + RESULT_FAILED_OUTOFMEMORY, + RESULT_FAILED_DISKSPACE, + ) + from pkg.client.debugvalues import DebugValues + from pkg.client.pkgdefs import * + from pkg.misc import EmptyI, msg, emsg, PipeError + from pkg.client.plandesc import OP_STAGE_PREP, OP_STAGE_EXEC, OP_STAGE_PLAN except KeyboardInterrupt: - import sys - sys.exit(1) + import sys + + sys.exit(1) CLIENT_API_VERSION = 83 PKG_CLIENT_NAME = "pkg" @@ -139,388 +152,452 @@ default_attrs = {} for atype, aclass in six.iteritems(actions.types): - default_attrs[atype] = [aclass.key_attr] - if atype == "depend": - default_attrs[atype].insert(0, "type") - if atype == "set": - default_attrs[atype].append("value") + default_attrs[atype] = [aclass.key_attr] + if atype == "depend": + default_attrs[atype].insert(0, "type") + if atype == "set": + default_attrs[atype].append("value") _api_inst = None tmpdirs = [] tmpfiles = [] + @atexit.register def cleanup(): - """To be called at program finish.""" - for d in tmpdirs: - shutil.rmtree(d, True) - for f in tmpfiles: - os.unlink(f) + """To be called at program finish.""" + for d in tmpdirs: + shutil.rmtree(d, True) + for f in tmpfiles: + os.unlink(f) + def notes_block(release_url=None): - url = "https://omnios.org/" - if release_url is None: - release_url = misc.get_release_notes_url() - msg("\n" + "-" * 79) - msg("{:42} {}".format("Find release notes:", release_url)) - msg("-" * 79) - msg("{:42} {}{}".format("Get a support contract:", url, "support")) - msg("{:42} {}{}".format("Sponsor OmniOS development:", url, "patron")) - msg("{:42} {}{}".format("Contribute to OmniOS:", url, "joinus")) - msg("-" * 79 + "\n") + url = "https://omnios.org/" + if release_url is None: + release_url = misc.get_release_notes_url() + msg("\n" + "-" * 79) + msg("{:42} {}".format("Find release notes:", release_url)) + msg("-" * 79) + msg("{:42} {}{}".format("Get a support contract:", url, "support")) + msg("{:42} {}{}".format("Sponsor OmniOS development:", url, "patron")) + msg("{:42} {}{}".format("Contribute to OmniOS:", url, "joinus")) + msg("-" * 79 + "\n") + def format_update_error(e): - # This message is displayed to the user whenever an - # ImageFormatUpdateNeeded exception is encountered. - logger.error("\n") - logger.error(str(e)) - logger.error(_("To continue, execute 'pkg update-format' as a " + # This message is displayed to the user whenever an + # ImageFormatUpdateNeeded exception is encountered. + logger.error("\n") + logger.error(str(e)) + logger.error( + _( + "To continue, execute 'pkg update-format' as a " "privileged user and then try again. Please note that updating " "the format of the image will render it unusable with older " - "versions of the pkg(7) system.")) - -def error(text, cmd=None): - """Emit an error message prefixed by the command name """ - - if not isinstance(text, six.string_types): - # Assume it's an object that can be stringified. - text = str(text) - - # If the message starts with whitespace, assume that it should come - # *before* the command-name prefix. - text_nows = text.lstrip() - ws = text[:len(text) - len(text_nows)] - - if cmd: - text_nows = "{0}: {1}".format(cmd, text_nows) - pkg_cmd = "pkg " - else: - pkg_cmd = "pkg: " - - # This has to be a constant value as we can't reliably get our actual - # program name on all platforms. - logger.error(ws + pkg_cmd + text_nows) - -def usage(usage_error=None, cmd=None, retcode=EXIT_BADOPT, full=False, - verbose=False, unknown_cmd=None): - """Emit a usage message and optionally prefix it with a more - specific error message. Causes program to exit. """ - - if usage_error: - error(usage_error, cmd=cmd) - - basic_usage = {} - adv_usage = {} - priv_usage = {} - - basic_cmds = ["refresh", "install", "uninstall", "update", - "apply-hot-fix", "autoremove", "list", "version"] - - beopts = ( - " [--no-be-activate] [--temp-be-activate]\n" - " [--no-backup-be | --require-backup-be] [--backup-be-name name]\n" - " [--deny-new-be | --require-new-be] [--be-name name]\n" - ) - - recurseopts = ( - " [-R | -r [-z zonename... | -Z zonename...]]\n" + "versions of the pkg(7) system." ) + ) - # For those operations that do not recurse by default, even with - # the default-recurse image property set to true, -R makes no sense - recurseoptsnoR = ( - " [-r [-z zonename... | -Z zonename...]]\n" - ) - basic_usage["install"] = _( - "[-nvq] [-C n] [-g path_or_uri ...]\n" - " [--no-index] [--no-refresh] [--licenses] [--accept]\n" - + beopts + recurseoptsnoR + - " [--sync-actuators | --sync-actuators-timeout timeout]\n" - " [--reject pkg_fmri_pattern ... ] pkg_fmri_pattern ...") - basic_usage["uninstall"] = _( - "[-nvq] [-C n] [--ignore-missing] [--no-index]\n" - + beopts + recurseoptsnoR + - " [--sync-actuators | --sync-actuators-timeout timeout]\n" - " pkg_fmri_pattern ...") - basic_usage["update"] = _( - "[-fnvq] [-C n] [-g path_or_uri ...] [--ignore-missing]\n" - " [--no-index] [--no-refresh] [--licenses] [--accept]\n" - + beopts + recurseopts + - " [--sync-actuators | --sync-actuators-timeout timeout]\n" - " [--reject pkg_fmri_pattern ...] [pkg_fmri_pattern ...]") - basic_usage["apply-hot-fix"] = _( - "[-nvq]\n" - + beopts + recurseopts + - " [pkg_fmri_pattern ...]") - - basic_usage["autoremove"] = _( - "[-nvq] [-C n] [--ignore-missing] [--no-index]\n" - + beopts + recurseoptsnoR + - " [--sync-actuators | --sync-actuators-timeout timeout]" - ) +def error(text, cmd=None): + """Emit an error message prefixed by the command name""" + + if not isinstance(text, six.string_types): + # Assume it's an object that can be stringified. + text = str(text) + + # If the message starts with whitespace, assume that it should come + # *before* the command-name prefix. + text_nows = text.lstrip() + ws = text[: len(text) - len(text_nows)] + + if cmd: + text_nows = "{0}: {1}".format(cmd, text_nows) + pkg_cmd = "pkg " + else: + pkg_cmd = "pkg: " + + # This has to be a constant value as we can't reliably get our actual + # program name on all platforms. + logger.error(ws + pkg_cmd + text_nows) + + +def usage( + usage_error=None, + cmd=None, + retcode=EXIT_BADOPT, + full=False, + verbose=False, + unknown_cmd=None, +): + """Emit a usage message and optionally prefix it with a more + specific error message. Causes program to exit.""" + + if usage_error: + error(usage_error, cmd=cmd) + + basic_usage = {} + adv_usage = {} + priv_usage = {} + + basic_cmds = [ + "refresh", + "install", + "uninstall", + "update", + "apply-hot-fix", + "autoremove", + "list", + "version", + ] + + beopts = ( + " [--no-be-activate] [--temp-be-activate]\n" + " [--no-backup-be | --require-backup-be] [--backup-be-name name]\n" + " [--deny-new-be | --require-new-be] [--be-name name]\n" + ) + + recurseopts = " [-R | -r [-z zonename... | -Z zonename...]]\n" + + # For those operations that do not recurse by default, even with + # the default-recurse image property set to true, -R makes no sense + recurseoptsnoR = " [-r [-z zonename... | -Z zonename...]]\n" + + basic_usage["install"] = _( + "[-nvq] [-C n] [-g path_or_uri ...]\n" + " [--no-index] [--no-refresh] [--licenses] [--accept]\n" + + beopts + + recurseoptsnoR + + " [--sync-actuators | --sync-actuators-timeout timeout]\n" + " [--reject pkg_fmri_pattern ... ] pkg_fmri_pattern ..." + ) + basic_usage["uninstall"] = _( + "[-nvq] [-C n] [--ignore-missing] [--no-index]\n" + + beopts + + recurseoptsnoR + + " [--sync-actuators | --sync-actuators-timeout timeout]\n" + " pkg_fmri_pattern ..." + ) + basic_usage["update"] = _( + "[-fnvq] [-C n] [-g path_or_uri ...] [--ignore-missing]\n" + " [--no-index] [--no-refresh] [--licenses] [--accept]\n" + + beopts + + recurseopts + + " [--sync-actuators | --sync-actuators-timeout timeout]\n" + " [--reject pkg_fmri_pattern ...] [pkg_fmri_pattern ...]" + ) + basic_usage["apply-hot-fix"] = _( + "[-nvq]\n" + + beopts + + recurseopts + + " [pkg_fmri_pattern ...]" + ) + + basic_usage["autoremove"] = _( + "[-nvq] [-C n] [--ignore-missing] [--no-index]\n" + + beopts + + recurseoptsnoR + + " [--sync-actuators | --sync-actuators-timeout timeout]" + ) + + basic_usage["list"] = _( + "[-HafiMmnqRrsuv] [-g path_or_uri ...] [--no-refresh]\n" + " [-F format] [-o column[,column]]\n" + " [pkg_fmri_pattern ...]" + ) + basic_usage["refresh"] = _("[-q] [--full] [publisher ...]") + basic_usage["version"] = "" + + advanced_cmds = [ + "info", + "contents", + "search", + "", + "verify", + "fix", + "revert", + "", + "mediator", + "set-mediator", + "unset-mediator", + "", + "variant", + "change-variant", + "", + "facet", + "change-facet", + "", + "avoid", + "unavoid", + "", + "freeze", + "unfreeze", + "", + "property", + "set-property", + "add-property-value", + "remove-property-value", + "unset-property", + "", + "publisher", + "set-publisher", + "unset-publisher", + "", + "history", + "purge-history", + "", + "rebuild-index", + "update-format", + "image-create", + "exact-install", + "", + "dehydrate", + "rehydrate", + "", + "flag", + "clean", + ] + + adv_usage["info"] = _( + "[-lqr] [-g path_or_uri ...] [--license] [pkg_fmri_pattern ...]" + ) + adv_usage["contents"] = _( + "[-Hmr] [-a attribute=pattern]... [-g path_or_uri]...\n" + " [-o attribute[,attribute]...]... [-s sort_key]\n" + " [-t action_name[,action_name]...]...\n" + " [pkg_fmri_pattern ...]" + ) + adv_usage["search"] = _("[-HIaflpr] [-o attribute ...] [-s repo_uri] query") + + adv_usage["verify"] = _( + "[-Hqv] [-p path]... [--parsable version]\n" + " [--unpackaged] [--unpackaged-only] [pkg_fmri_pattern ...]" + ) + adv_usage["fix"] = _( + "[-nvq]\n" + + beopts + + " [--accept] [--licenses] [--parsable version] [--unpackaged]\n" + " [pkg_fmri_pattern ...]" + ) + adv_usage["revert"] = _( + "[-nv]\n" + + beopts + + " (--tagged tag-name ... | path-to-file ...)" + ) + + adv_usage["image-create"] = _( + "[-FPUfz] [--force] [--full|--partial|--user] [--zone]\n" + " [-k ssl_key] [-c ssl_cert] [--no-refresh]\n" + " [--variant = ...]\n" + " [-g uri|--origin=uri ...] [-m uri|--mirror=uri ...]\n" + " [--facet =(True|False) ...]\n" + " [(-p|--publisher) [=]] dir" + ) + adv_usage["change-variant"] = _( + "[-nvq] [-C n] [-g path_or_uri ...] [--accept]\n" + " [--licenses] [--no-index] [--no-refresh]\n" + + beopts + + recurseopts + + " [--sync-actuators | --sync-actuators-timeout timeout]\n" + " [--reject pkg_fmri_pattern ... ]\n" + " = ..." + ) + + adv_usage["change-facet"] = _( + "[-nvq] [-C n] [-g path_or_uri ...] [--accept]\n" + " [--licenses] [--no-index] [--no-refresh]\n" + + beopts + + recurseopts + + " [--sync-actuators | --sync-actuators-timeout timeout]\n" + " [--reject pkg_fmri_pattern ... ]\n" + " =[True|False|None] ..." + ) + + adv_usage["mediator"] = _("[-aH] [-F format] [ ...]") + adv_usage["set-mediator"] = _( + "[-nv] [-I ]\n" + " [-V ]\n" + beopts + " ..." + ) + adv_usage["unset-mediator"] = _( + "[-nvIV]\n" + beopts + " ..." + ) + + adv_usage["variant"] = _("[-Haiv] [-F format] [ ...]") + adv_usage["facet"] = "[-Haim] [-F format] [ ...]" + adv_usage["avoid"] = _("[pkg_fmri_pattern] ...") + adv_usage["unavoid"] = _("[pkg_fmri_pattern] ...") + adv_usage["freeze"] = _("[-n] [-c reason] [pkg_fmri_pattern] ...") + adv_usage["unfreeze"] = _("[-n] [pkg_name_pattern] ...") + adv_usage["set-property"] = _("propname propvalue") + adv_usage["add-property-value"] = _("propname propvalue") + adv_usage["remove-property-value"] = _("propname propvalue") + adv_usage["unset-property"] = _("propname ...") + adv_usage["property"] = _("[-H] [propname ...]") + + adv_usage["set-publisher"] = _( + "[-Pedv] [-k ssl_key] [-c ssl_cert]\n" + " [-O origin_to_set|--origin-uri=origin_to_set ...]\n" + " [-g origin_to_add|--add-origin=origin_to_add ...]\n" + " [-G origin_to_remove|--remove-origin=origin_to_remove ...]\n" + " [-m mirror_to_add|--add-mirror=mirror_to_add ...]\n" + " [-M mirror_to_remove|--remove-mirror=mirror_to_remove ...]\n" + " [-p repo_uri] [--enable] [--disable] [--no-refresh]\n" + + recurseopts + + " [--reset-uuid] [--non-sticky] [--sticky]\n" + " [--search-after=publisher]\n" + " [--search-before=publisher]\n" + " [--search-first]\n" + " [--approve-ca-cert=path_to_CA]\n" + " [--revoke-ca-cert=hash_of_CA_to_revoke]\n" + " [--unset-ca-cert=hash_of_CA_to_unset]\n" + " [--set-property name_of_property=value]\n" + " [--add-property-value name_of_property=value_to_add]\n" + " [--remove-property-value name_of_property=value_to_remove]\n" + " [--unset-property name_of_property_to_delete]\n" + " [--proxy proxy to use]\n" + " [publisher]" + ) + + adv_usage["unset-publisher"] = _("publisher ...") + adv_usage["publisher"] = _("[-HPn] [-F format] [publisher ...]") + adv_usage["history"] = _( + "[-HNl] [-t [time|time-time],...] [-n number] [-o column,...]" + ) + adv_usage["purge-history"] = "" + adv_usage["rebuild-index"] = "" + adv_usage["update-format"] = "" + adv_usage["exact-install"] = _( + "[-nvq] [-C n] [-g path_or_uri ...] [--accept]\n" + " [--licenses] [--no-index] [--no-refresh]\n" + + beopts + + " [--reject pkg_fmri_pattern ... ] pkg_fmri_pattern ..." + ) + adv_usage["dehydrate"] = _("[-nvq] [-p publisher ...]") + adv_usage["rehydrate"] = _("[-nvq] [-p publisher ...]") + adv_usage["flag"] = _("[-mM] [pkg_fmri_pattern ...]") + adv_usage["clean"] = "[-v]" + + priv_usage["remote"] = _("--ctlfd=file_descriptor --progfd=file_descriptor") + priv_usage["list-linked"] = _("-H") + priv_usage["attach-linked"] = _( + "[-fnvq] [-C n] [--accept] [--licenses] [--no-index]\n" + " [--no-refresh] [--no-pkg-updates] [--linked-md-only]\n" + " [--allow-relink]\n" + " [--prop-linked = ...]\n" + " (-c|-p) " + ) + priv_usage["detach-linked"] = _( + "[-fnvq] [-a|-l ] [--no-pkg-updates] [--linked-md-only]" + ) + priv_usage["property-linked"] = _("[-H] [-l ] [propname ...]") + priv_usage["audit-linked"] = _("[-H] [-a|-l ] [--no-parent-sync]") + priv_usage["pubcheck-linked"] = "" + priv_usage["clean-up-hot-fix"] = "" + priv_usage["sync-linked"] = _( + "[-nvq] [-C n] [--accept] [--licenses] [--no-index]\n" + " [--no-refresh] [--no-parent-sync] [--no-pkg-updates]\n" + " [--linked-md-only] [-a|-l ]" + ) + priv_usage["set-property-linked"] = _( + "[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]\n" + " [--no-parent-sync] [--no-pkg-updates]\n" + " [--linked-md-only] = ..." + ) + priv_usage["copy-publishers-from"] = _("") + priv_usage["list-uuids"] = "" + + def print_cmds(cmd_list, cmd_dic): + for cmd in cmd_list: + if cmd == "": + logger.error("") + else: + if cmd not in cmd_dic: + # this should never happen - callers + # should check for valid subcommands + # before calling usage(..) + raise ValueError( + "Unable to find usage str for " "{0}".format(cmd) + ) + use_txt = cmd_dic[cmd] + if use_txt != "": + logger.error( + " pkg {cmd} " "{use_txt}".format(**locals()) + ) + else: + logger.error(" pkg " "{0}".format(cmd)) - basic_usage["list"] = _( - "[-HafiMmnqRrsuv] [-g path_or_uri ...] [--no-refresh]\n" - " [-F format] [-o column[,column]]\n" - " [pkg_fmri_pattern ...]") - basic_usage["refresh"] = _("[-q] [--full] [publisher ...]") - basic_usage["version"] = "" - - advanced_cmds = [ - "info", - "contents", - "search", - "", - "verify", - "fix", - "revert", - "", - "mediator", - "set-mediator", - "unset-mediator", - "", - "variant", - "change-variant", - "", - "facet", - "change-facet", - "", - "avoid", - "unavoid", - "", - "freeze", - "unfreeze", - "", - "property", - "set-property", - "add-property-value", - "remove-property-value", - "unset-property", - "", - "publisher", - "set-publisher", - "unset-publisher", - "", - "history", - "purge-history", - "", - "rebuild-index", - "update-format", - "image-create", - "exact-install", - "", - "dehydrate", - "rehydrate", - "", - "flag", - "clean", - ] + if not full and cmd: + if cmd not in priv_usage: + logger.error(_("Usage:")) + else: + logger.error( + _( + "Private subcommand usage, options " + "subject to change at any time:" + ) + ) + combined = {} + combined.update(basic_usage) + combined.update(adv_usage) + combined.update(priv_usage) + print_cmds([cmd], combined) + sys.exit(retcode) - adv_usage["info"] = \ - _("[-lqr] [-g path_or_uri ...] [--license] [pkg_fmri_pattern ...]") - adv_usage["contents"] = _( - "[-Hmr] [-a attribute=pattern]... [-g path_or_uri]...\n" - " [-o attribute[,attribute]...]... [-s sort_key]\n" - " [-t action_name[,action_name]...]...\n" - " [pkg_fmri_pattern ...]") - adv_usage["search"] = _( - "[-HIaflpr] [-o attribute ...] [-s repo_uri] query") - - adv_usage["verify"] = _("[-Hqv] [-p path]... [--parsable version]\n" - " [--unpackaged] [--unpackaged-only] [pkg_fmri_pattern ...]") - adv_usage["fix"] = _( - "[-nvq]\n" - + beopts + - " [--accept] [--licenses] [--parsable version] [--unpackaged]\n" - " [pkg_fmri_pattern ...]") - adv_usage["revert"] = _( - "[-nv]\n" - + beopts + - " (--tagged tag-name ... | path-to-file ...)") - - adv_usage["image-create"] = _( - "[-FPUfz] [--force] [--full|--partial|--user] [--zone]\n" - " [-k ssl_key] [-c ssl_cert] [--no-refresh]\n" - " [--variant = ...]\n" - " [-g uri|--origin=uri ...] [-m uri|--mirror=uri ...]\n" - " [--facet =(True|False) ...]\n" - " [(-p|--publisher) [=]] dir") - adv_usage["change-variant"] = _( - "[-nvq] [-C n] [-g path_or_uri ...] [--accept]\n" - " [--licenses] [--no-index] [--no-refresh]\n" - + beopts + recurseopts + - " [--sync-actuators | --sync-actuators-timeout timeout]\n" - " [--reject pkg_fmri_pattern ... ]\n" - " = ...") - - adv_usage["change-facet"] = _( - "[-nvq] [-C n] [-g path_or_uri ...] [--accept]\n" - " [--licenses] [--no-index] [--no-refresh]\n" - + beopts + recurseopts + - " [--sync-actuators | --sync-actuators-timeout timeout]\n" - " [--reject pkg_fmri_pattern ... ]\n" - " =[True|False|None] ...") - - adv_usage["mediator"] = _("[-aH] [-F format] [ ...]") - adv_usage["set-mediator"] = _( - "[-nv] [-I ]\n" - " [-V ]\n" - + beopts + - " ...") - adv_usage["unset-mediator"] = _("[-nvIV]\n" - + beopts + - " ...") - - adv_usage["variant"] = _("[-Haiv] [-F format] [ ...]") - adv_usage["facet"] = ("[-Haim] [-F format] [ ...]") - adv_usage["avoid"] = _("[pkg_fmri_pattern] ...") - adv_usage["unavoid"] = _("[pkg_fmri_pattern] ...") - adv_usage["freeze"] = _("[-n] [-c reason] [pkg_fmri_pattern] ...") - adv_usage["unfreeze"] = _("[-n] [pkg_name_pattern] ...") - adv_usage["set-property"] = _("propname propvalue") - adv_usage["add-property-value"] = _("propname propvalue") - adv_usage["remove-property-value"] = _("propname propvalue") - adv_usage["unset-property"] = _("propname ...") - adv_usage["property"] = _("[-H] [propname ...]") - - adv_usage["set-publisher"] = _("[-Pedv] [-k ssl_key] [-c ssl_cert]\n" - " [-O origin_to_set|--origin-uri=origin_to_set ...]\n" - " [-g origin_to_add|--add-origin=origin_to_add ...]\n" - " [-G origin_to_remove|--remove-origin=origin_to_remove ...]\n" - " [-m mirror_to_add|--add-mirror=mirror_to_add ...]\n" - " [-M mirror_to_remove|--remove-mirror=mirror_to_remove ...]\n" - " [-p repo_uri] [--enable] [--disable] [--no-refresh]\n" - + recurseopts + - " [--reset-uuid] [--non-sticky] [--sticky]\n" - " [--search-after=publisher]\n" - " [--search-before=publisher]\n" - " [--search-first]\n" - " [--approve-ca-cert=path_to_CA]\n" - " [--revoke-ca-cert=hash_of_CA_to_revoke]\n" - " [--unset-ca-cert=hash_of_CA_to_unset]\n" - " [--set-property name_of_property=value]\n" - " [--add-property-value name_of_property=value_to_add]\n" - " [--remove-property-value name_of_property=value_to_remove]\n" - " [--unset-property name_of_property_to_delete]\n" - " [--proxy proxy to use]\n" - " [publisher]") - - adv_usage["unset-publisher"] = _("publisher ...") - adv_usage["publisher"] = _("[-HPn] [-F format] [publisher ...]") - adv_usage["history"] = _("[-HNl] [-t [time|time-time],...] [-n number] [-o column,...]") - adv_usage["purge-history"] = "" - adv_usage["rebuild-index"] = "" - adv_usage["update-format"] = "" - adv_usage["exact-install"] = _("[-nvq] [-C n] [-g path_or_uri ...] [--accept]\n" - " [--licenses] [--no-index] [--no-refresh]\n" - + beopts + - " [--reject pkg_fmri_pattern ... ] pkg_fmri_pattern ...") - adv_usage["dehydrate"] = _("[-nvq] [-p publisher ...]") - adv_usage["rehydrate"] = _("[-nvq] [-p publisher ...]") - adv_usage["flag"] = _("[-mM] [pkg_fmri_pattern ...]") - adv_usage["clean"] = "[-v]" - - priv_usage["remote"] = _( - "--ctlfd=file_descriptor --progfd=file_descriptor") - priv_usage["list-linked"] = _("-H") - priv_usage["attach-linked"] = _( - "[-fnvq] [-C n] [--accept] [--licenses] [--no-index]\n" - " [--no-refresh] [--no-pkg-updates] [--linked-md-only]\n" - " [--allow-relink]\n" - " [--prop-linked = ...]\n" - " (-c|-p) ") - priv_usage["detach-linked"] = _( - "[-fnvq] [-a|-l ] [--no-pkg-updates] [--linked-md-only]") - priv_usage["property-linked"] = _("[-H] [-l ] [propname ...]") - priv_usage["audit-linked"] = _( - "[-H] [-a|-l ] [--no-parent-sync]") - priv_usage["pubcheck-linked"] = "" - priv_usage["clean-up-hot-fix"] = "" - priv_usage["sync-linked"] = _( - "[-nvq] [-C n] [--accept] [--licenses] [--no-index]\n" - " [--no-refresh] [--no-parent-sync] [--no-pkg-updates]\n" - " [--linked-md-only] [-a|-l ]") - priv_usage["set-property-linked"] = _( - "[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]\n" - " [--no-parent-sync] [--no-pkg-updates]\n" - " [--linked-md-only] = ...") - priv_usage["copy-publishers-from"] = _("") - priv_usage["list-uuids"] = "" - - def print_cmds(cmd_list, cmd_dic): - for cmd in cmd_list: - if cmd == "": - logger.error("") - else: - if cmd not in cmd_dic: - # this should never happen - callers - # should check for valid subcommands - # before calling usage(..) - raise ValueError( - "Unable to find usage str for " - "{0}".format(cmd)) - use_txt = cmd_dic[cmd] - if use_txt != "": - logger.error( - " pkg {cmd} " - "{use_txt}".format(**locals())) - else: - logger.error(" pkg " - "{0}".format(cmd)) - if not full and cmd: - if cmd not in priv_usage: - logger.error(_("Usage:")) - else: - logger.error(_("Private subcommand usage, options " - "subject to change at any time:")) - combined = {} - combined.update(basic_usage) - combined.update(adv_usage) - combined.update(priv_usage) - print_cmds([cmd], combined) - sys.exit(retcode) - - elif not full: - # The full list of subcommands isn't desired. - known_words = ["help"] - known_words.extend(basic_cmds) - known_words.extend(w for w in advanced_cmds if w) - candidates = misc.suggest_known_words(unknown_cmd, known_words) - if candidates: - # Suggest correct subcommands if we can. - words = ", ". join(candidates) - logger.error(_("Did you mean:\n {0}\n").format(words)) - logger.error(_("For a full list of subcommands, run: pkg help")) - sys.exit(retcode) + elif not full: + # The full list of subcommands isn't desired. + known_words = ["help"] + known_words.extend(basic_cmds) + known_words.extend(w for w in advanced_cmds if w) + candidates = misc.suggest_known_words(unknown_cmd, known_words) + if candidates: + # Suggest correct subcommands if we can. + words = ", ".join(candidates) + logger.error(_("Did you mean:\n {0}\n").format(words)) + logger.error(_("For a full list of subcommands, run: pkg help")) + sys.exit(retcode) - if verbose: - # Display a verbose usage message of subcommands. - logger.error(_("""\ + if verbose: + # Display a verbose usage message of subcommands. + logger.error( + _( + """\ Usage: pkg [options] command [cmd_options] [operands] -""")) - logger.error(_("Basic subcommands:")) - print_cmds(basic_cmds, basic_usage) +""" + ) + ) + logger.error(_("Basic subcommands:")) + print_cmds(basic_cmds, basic_usage) - logger.error(_("\nAdvanced subcommands:")) - print_cmds(advanced_cmds, adv_usage) + logger.error(_("\nAdvanced subcommands:")) + print_cmds(advanced_cmds, adv_usage) - logger.error(_(""" + logger.error( + _( + """ Options: -R dir --no-network-cache --help or -? Environment: - PKG_IMAGE""")) - else: - # Display the full list of subcommands. - logger.error(_("""\ -Usage: pkg [options] command [cmd_options] [operands]""")) - logger.error(_("The following commands are supported:")) - logger.error(_(""" + PKG_IMAGE""" + ) + ) + else: + # Display the full list of subcommands. + logger.error( + _( + """\ +Usage: pkg [options] command [cmd_options] [operands]""" + ) + ) + logger.error(_("The following commands are supported:")) + logger.error( + _( + """ Package Information : list search info contents Package Transitions : update install uninstall history exact-install apply-hot-fix @@ -535,5216 +612,6571 @@ def print_cmds(cmd_list, cmd_dic): property set-property add-property-value unset-property remove-property-value Miscellaneous : image-create dehydrate rehydrate clean -For more info, run: pkg help """)) - sys.exit(retcode) +For more info, run: pkg help """ + ) + ) + sys.exit(retcode) + def get_fmri_args(api_inst, pargs, cmd=None): - """ Convenience routine to check that input args are valid fmris. """ + """Convenience routine to check that input args are valid fmris.""" + + res = [] + errors = [] + for pat, err, pfmri, matcher in api_inst.parse_fmri_patterns(pargs): + if not err: + res.append((pat, err, pfmri, matcher)) + continue + if isinstance(err, version.VersionError): + # For version errors, include the pattern so + # that the user understands why it failed. + errors.append("Illegal FMRI '{0}': {1}".format(pat, err)) + else: + # Including the pattern is redundant for other + # exceptions. + errors.append(err) + if errors: + error("\n".join(str(e) for e in errors), cmd=cmd) + return len(errors) == 0, res - res = [] - errors = [] - for pat, err, pfmri, matcher in api_inst.parse_fmri_patterns(pargs): - if not err: - res.append((pat, err, pfmri, matcher)) - continue - if isinstance(err, version.VersionError): - # For version errors, include the pattern so - # that the user understands why it failed. - errors.append("Illegal FMRI '{0}': {1}".format(pat, - err)) - else: - # Including the pattern is redundant for other - # exceptions. - errors.append(err) - if errors: - error("\n".join(str(e) for e in errors), cmd=cmd) - return len(errors) == 0, res def calc_fmtstr(attrs, data): - widths = [0] * len(data[0]) + widths = [0] * len(data[0]) - for entry in data: - for i, field in enumerate(attrs): - if len(entry.get(field)) > widths[i]: - widths[i] = len(entry.get(field)) + for entry in data: + for i, field in enumerate(attrs): + if len(entry.get(field)) > widths[i]: + widths[i] = len(entry.get(field)) + + fmt = "" + for i, w in enumerate(widths): + fmt += "{" + f"{i}:{w+1}" + "}" + return fmt - fmt = '' - for i, w in enumerate(widths): - fmt += '{' + f'{i}:{w+1}' + '}' - return fmt def format_output(attrs, fields, data, output_format, fmt_str, omit_headers): - field_data = { a: [all_formats, fields.get(a), ""] for a in attrs } - order = [ fields.get(a) for a in attrs ] - - sys.stdout.write(misc.get_listing( - order, field_data, data, output_format, fmt_str, omit_headers, - False)) - -def list_inventory(op, api_inst, pargs, - li_parent_sync, list_all, list_installed_newest, list_newest, - list_upgradable, omit_headers, output_format, output_fields, origins, quiet, - refresh_catalogs, summary, list_removable, list_all_removable, list_manual, - list_not_manual, list_installable, verbose): - """List packages.""" - - def gen(meta=False): - fields = { - "fmri": { - "header": "FMRI", - "display": lambda x: - f"pkg://{x['pub']}/{x['pkg']}@{short_ver}:{ts}" - }, - "version": { - "header": "VERSION", - "display": lambda x: short_ver - }, - "release": { - "header": "RELEASE", - "display": lambda x: release - }, - "osrelease": { - "header": "OS RELEASE", - "display": lambda x: build_release - }, - "branch": { - "header": "BRANCH", - "display": lambda x: branch - }, - "timestamp": { - "header": "TIMESTAMP", - "display": lambda x: ts - }, - "flags": { - "header": "IFO", - "display": lambda x: status - }, - "name": { - "header": "NAME", - "display": lambda x: x.get('pkg') - }, - "publisher": { - "header": "PUBLISHER", - "display": lambda x: x.get('pub') - }, - "namepub": { - "header": "NAME (PUBLISHER)", - "display": lambda x: - x.get('pkg') + ( - f" ({x['pub']})" if x['pub'] != ppub else '' - ) - }, - "summary": { - "header": "SUMMARY", - "display": lambda x: x.get('summary') - }, - } - - if meta: - yield {k: fields[k].get('header') - for k in fields.keys()} - return - - if not "data" in out_json: - return - data = out_json["data"] - - ppub = api_inst.get_highest_ranked_publisher() - if ppub: - ppub = ppub.prefix - - state_map = [ - [ - ("installed", "i") - ], - [ - ("frozen", "f"), - ("optional", "S"), - ("manual", "m") - ], - [ - ("obsolete", "o"), - ("renamed", "r"), - ("legacy", "l") - ], - ] + field_data = {a: [all_formats, fields.get(a), ""] for a in attrs} + order = [fields.get(a) for a in attrs] - if quiet: - return - - for entry in data: - if (list_removable and not list_all_removable and - 'optional' in entry['states']): - continue - - if list_manual and 'manual' not in entry['states']: - continue - - if list_not_manual and 'manual' in entry['states']: - continue - - if (list_installable and - not set(['installed', 'obsolete', 'renamed']) - .isdisjoint(entry['states'])): - continue - - status = "" - for sentry in state_map: - for s, v in sentry: - if s in entry["states"]: - st = v - break - else: - st = "-" - status += st - - # Use class method instead of creating an object for - # performance reasons. - (release, build_release, branch, ts), \ - short_ver = version.Version.split(entry['version']) - - yield { a: fields[a].get('display')(entry) - for a in attrs - } - - fields = list(gen(True))[0] - - accumulate = False - fmt_str = "{0}" - if output_fields: - if verbose or summary: - usage("-o cannot be used with -v or -s") - attrs = list(dict.fromkeys(output_fields.lower().split(","))) - unknown = [a for a in attrs if not fields.get(a)] - if len(unknown): - usage( - "Unknown output field(s): {}\n".format( - ', '.join(unknown)) + - "Known fields are: {}".format( - ', '.join(sorted(fields.keys())))) - if len(attrs) == 0: - usage("No fields specified. Known fields are: {}" - .format(', '.join(sorted(fields.keys())))) - # If there is more than one field selected, we need to - # accumulate all results in order to determine the column - # widths. - if len(attrs) > 1: - accumulate = True - elif verbose: - fmt_str = "{0:76} {1}" - attrs = ["fmri", "flags"] - elif summary: - fmt_str = "{0:55} {1}" - attrs = ["namepub", "summary"] - else: - fmt_str = "{0:49} {1:26} {2}" - attrs = ["namepub", "version", "flags"] - - # getting json output. - out_json = client_api._list_inventory(op, api_inst, pargs, - li_parent_sync, list_all, list_installed_newest, list_newest, - list_upgradable, origins, quiet, refresh_catalogs, - list_removable=list_removable) - - errors = None - if "errors" in out_json: - errors = out_json["errors"] - errors = _generate_error_messages(out_json["status"], errors, - selected_type=["catalog_refresh", "catalog_refresh_failed"]) - - if accumulate: - data = list(gen()) - fmt_str = calc_fmtstr(attrs, data) - else: - data = list(gen()) + sys.stdout.write( + misc.get_listing( + order, field_data, data, output_format, fmt_str, omit_headers, False + ) + ) - if len(data) == 0: - omit_headers = True - format_output(attrs, fields, data, output_format, fmt_str, omit_headers) +def list_inventory( + op, + api_inst, + pargs, + li_parent_sync, + list_all, + list_installed_newest, + list_newest, + list_upgradable, + omit_headers, + output_format, + output_fields, + origins, + quiet, + refresh_catalogs, + summary, + list_removable, + list_all_removable, + list_manual, + list_not_manual, + list_installable, + verbose, +): + """List packages.""" + + def gen(meta=False): + fields = { + "fmri": { + "header": "FMRI", + "display": lambda x: f"pkg://{x['pub']}/{x['pkg']}@{short_ver}:{ts}", + }, + "version": {"header": "VERSION", "display": lambda x: short_ver}, + "release": {"header": "RELEASE", "display": lambda x: release}, + "osrelease": { + "header": "OS RELEASE", + "display": lambda x: build_release, + }, + "branch": {"header": "BRANCH", "display": lambda x: branch}, + "timestamp": {"header": "TIMESTAMP", "display": lambda x: ts}, + "flags": {"header": "IFO", "display": lambda x: status}, + "name": {"header": "NAME", "display": lambda x: x.get("pkg")}, + "publisher": { + "header": "PUBLISHER", + "display": lambda x: x.get("pub"), + }, + "namepub": { + "header": "NAME (PUBLISHER)", + "display": lambda x: x.get("pkg") + + (f" ({x['pub']})" if x["pub"] != ppub else ""), + }, + "summary": { + "header": "SUMMARY", + "display": lambda x: x.get("summary"), + }, + } - # Print errors left. - if errors: - _generate_error_messages(out_json["status"], errors) + if meta: + yield {k: fields[k].get("header") for k in fields.keys()} + return + + if not "data" in out_json: + return + data = out_json["data"] + + ppub = api_inst.get_highest_ranked_publisher() + if ppub: + ppub = ppub.prefix + + state_map = [ + [("installed", "i")], + [("frozen", "f"), ("optional", "S"), ("manual", "m")], + [("obsolete", "o"), ("renamed", "r"), ("legacy", "l")], + ] + + if quiet: + return + + for entry in data: + if ( + list_removable + and not list_all_removable + and "optional" in entry["states"] + ): + continue + + if list_manual and "manual" not in entry["states"]: + continue + + if list_not_manual and "manual" in entry["states"]: + continue + + if list_installable and not set( + ["installed", "obsolete", "renamed"] + ).isdisjoint(entry["states"]): + continue + + status = "" + for sentry in state_map: + for s, v in sentry: + if s in entry["states"]: + st = v + break + else: + st = "-" + status += st + + # Use class method instead of creating an object for + # performance reasons. + ( + release, + build_release, + branch, + ts, + ), short_ver = version.Version.split(entry["version"]) + + yield {a: fields[a].get("display")(entry) for a in attrs} + + fields = list(gen(True))[0] + + accumulate = False + fmt_str = "{0}" + if output_fields: + if verbose or summary: + usage("-o cannot be used with -v or -s") + attrs = list(dict.fromkeys(output_fields.lower().split(","))) + unknown = [a for a in attrs if not fields.get(a)] + if len(unknown): + usage( + "Unknown output field(s): {}\n".format(", ".join(unknown)) + + "Known fields are: {}".format( + ", ".join(sorted(fields.keys())) + ) + ) + if len(attrs) == 0: + usage( + "No fields specified. Known fields are: {}".format( + ", ".join(sorted(fields.keys())) + ) + ) + # If there is more than one field selected, we need to + # accumulate all results in order to determine the column + # widths. + if len(attrs) > 1: + accumulate = True + elif verbose: + fmt_str = "{0:76} {1}" + attrs = ["fmri", "flags"] + elif summary: + fmt_str = "{0:55} {1}" + attrs = ["namepub", "summary"] + else: + fmt_str = "{0:49} {1:26} {2}" + attrs = ["namepub", "version", "flags"] + + # getting json output. + out_json = client_api._list_inventory( + op, + api_inst, + pargs, + li_parent_sync, + list_all, + list_installed_newest, + list_newest, + list_upgradable, + origins, + quiet, + refresh_catalogs, + list_removable=list_removable, + ) + + errors = None + if "errors" in out_json: + errors = out_json["errors"] + errors = _generate_error_messages( + out_json["status"], + errors, + selected_type=["catalog_refresh", "catalog_refresh_failed"], + ) + + if accumulate: + data = list(gen()) + fmt_str = calc_fmtstr(attrs, data) + else: + data = list(gen()) + + if len(data) == 0: + omit_headers = True + + format_output(attrs, fields, data, output_format, fmt_str, omit_headers) + + # Print errors left. + if errors: + _generate_error_messages(out_json["status"], errors) + + return out_json["status"] - return out_json["status"] def get_tracker(): - if global_settings.client_output_parsable_version is not None: - progresstracker = progress.NullProgressTracker() - elif global_settings.client_output_quiet: - progresstracker = progress.QuietProgressTracker() - elif global_settings.client_output_progfd: - # This logic handles linked images: for linked children - # we elide the progress output. - output_file = os.fdopen(global_settings.client_output_progfd, - "w") - child_tracker = progress.LinkedChildProgressTracker( - output_file=output_file) - dot_tracker = progress.DotProgressTracker( - output_file=output_file) - progresstracker = progress.MultiProgressTracker( - [child_tracker, dot_tracker]) - else: - try: - progresstracker = progress.FancyUNIXProgressTracker() - except progress.ProgressTrackerException: - progresstracker = progress.CommandLineProgressTracker() + if global_settings.client_output_parsable_version is not None: + progresstracker = progress.NullProgressTracker() + elif global_settings.client_output_quiet: + progresstracker = progress.QuietProgressTracker() + elif global_settings.client_output_progfd: + # This logic handles linked images: for linked children + # we elide the progress output. + output_file = os.fdopen(global_settings.client_output_progfd, "w") + child_tracker = progress.LinkedChildProgressTracker( + output_file=output_file + ) + dot_tracker = progress.DotProgressTracker(output_file=output_file) + progresstracker = progress.MultiProgressTracker( + [child_tracker, dot_tracker] + ) + else: + try: + progresstracker = progress.FancyUNIXProgressTracker() + except progress.ProgressTrackerException: + progresstracker = progress.CommandLineProgressTracker() - return progresstracker + return progresstracker -def accept_plan_licenses(api_inst): - """Helper function that marks all licenses for the current plan as - accepted if they require acceptance.""" - plan = api_inst.describe() - for pfmri, src, dest, accepted, displayed in plan.get_licenses(): - if not dest.must_accept: - continue - api_inst.set_plan_license_status(pfmri, dest.license, - accepted=True) +def accept_plan_licenses(api_inst): + """Helper function that marks all licenses for the current plan as + accepted if they require acceptance.""" + + plan = api_inst.describe() + for pfmri, src, dest, accepted, displayed in plan.get_licenses(): + if not dest.must_accept: + continue + api_inst.set_plan_license_status(pfmri, dest.license, accepted=True) + + +display_plan_options = [ + "basic", + "fmris", + "variants/facets", + "services", + "actions", + "boot-archive", +] -display_plan_options = ["basic", "fmris", "variants/facets", "services", - "actions", "boot-archive"] def __display_plan(api_inst, verbose, noexecute, op=None): - """Helper function to display plan to the desired degree. - Verbose can either be a numerical value, or a list of - items to display""" - - if isinstance(verbose, int): - disp = ["basic"] - - if verbose == 0 and noexecute: - disp.append("release-notes") - if verbose > 0: - disp.extend(["fmris", "mediators", "services", - "variants/facets", "boot-archive", - "release-notes", "editable", "actuators"]) - if verbose > 1: - disp.append("actions") - if verbose > 2: - disp.append("solver-errors") - else: - disp = verbose + """Helper function to display plan to the desired degree. + Verbose can either be a numerical value, or a list of + items to display""" + + if isinstance(verbose, int): + disp = ["basic"] + + if verbose == 0 and noexecute: + disp.append("release-notes") + if verbose > 0: + disp.extend( + [ + "fmris", + "mediators", + "services", + "variants/facets", + "boot-archive", + "release-notes", + "editable", + "actuators", + ] + ) + if verbose > 1: + disp.append("actions") + if verbose > 2: + disp.append("solver-errors") + else: + disp = verbose - if DebugValues["plan"] and "solver-errors" not in disp: - disp.append("solver-errors") + if DebugValues["plan"] and "solver-errors" not in disp: + disp.append("solver-errors") - plan = api_inst.describe() + plan = api_inst.describe() - if not plan: - return + if not plan: + return - if plan.must_display_notes(): - disp.append("release-notes") + if plan.must_display_notes(): + disp.append("release-notes") - if api_inst.is_liveroot and not api_inst.is_active_liveroot_be: - # Warn the user since this isn't likely what they wanted. - if plan.new_be: - logger.warning(_("""\ + if api_inst.is_liveroot and not api_inst.is_active_liveroot_be: + # Warn the user since this isn't likely what they wanted. + if plan.new_be: + logger.warning( + _( + """\ ****************************************************************************** WARNING: The boot environment being modified is not the active one. Changes made in the active BE will not be reflected on the next boot. ****************************************************************************** -""")) - else: - logger.warning(_("""\ +""" + ) + ) + else: + logger.warning( + _( + """\ ****************************************************************************** WARNING: The boot environment being modified is not the active one. Changes made will not be reflected on the next boot. ****************************************************************************** -""")) - - # a = change(!!!) (due to fix or mediator/variant/facet) - # c = update - # r = remove - # i = install - a, r, i, c = [], [], [], [] - for src, dest in plan.get_changes(): - if dest is None: - r.append((src, dest)) - elif src is None: - i.append((src, dest)) - elif src != dest: - c.append((src, dest)) - else: - # Changing or repairing package content (e.g. fix, - # change-facet, etc.) - a.append((dest, dest)) - - def bool_str(val): - if val: - return _("Yes") - return _("No") - - status = [] - varcets = plan.get_varcets() - mediators = plan.get_mediators() - if "basic" in disp: - def cond_show(s1, s2, v): - if v: - status.append((s1, s2.format(v))) - - cond_show(_("Packages to remove:"), "{0:d}", len(r)) - cond_show(_("Packages to install:"), "{0:d}", len(i)) - cond_show(_("Packages to update:"), "{0:d}", len(c)) - if varcets or mediators: - cond_show(_("Packages to change:"), "{0:d}", len(a)) - else: - cond_show(_("Packages to fix:"), "{0:d}", len(a)) - cond_show(_("Mediators to change:"), "{0:d}", len(mediators)) - cond_show(_("Variants/Facets to change:"), "{0:d}", - len(varcets)) - if not plan.new_be: - cond_show(_("Services to change:"), "{0:d}", - len(plan.services)) - - if verbose: - # Only show space information in verbose mode. - abytes = plan.bytes_added - if abytes: - status.append((_("Estimated space available:"), - misc.bytes_to_str(plan.bytes_avail))) - status.append(( - _("Estimated space to be consumed:"), - misc.bytes_to_str(plan.bytes_added))) - - # only display BE information if we're operating on the - # liveroot environment (since otherwise we'll never be - # manipulating BEs). - if api_inst.is_liveroot: - status.append((_("Create boot environment:"), - bool_str(plan.new_be))) - - if plan.new_be: - status.append((_("Activate boot environment:"), - 'Next boot only' - if plan.activate_be == 'bootnext' - else bool_str(plan.activate_be))) - # plan.be_name can be undefined in the uninstall case - # so test it before trying to print it out. - if plan.be_name: - status.append((_("Name of boot environment:"), - plan.be_name)) - - status.append((_("Create backup boot environment:"), - bool_str(plan.backup_be))) - - if "boot-archive" in disp: - status.append((_("Rebuild boot archive:"), - bool_str(plan.update_boot_archive))) - - # Right-justify all status strings based on length of longest string. - if status: - rjust_status = max(len(s[0]) for s in status) - rjust_value = max(len(s[1]) for s in status) - for s in status: - logger.info("{0} {1}".format(s[0].rjust(rjust_status), - s[1].rjust(rjust_value))) - - # Display list of removed packages in the default output. - # Verbose output already has this in a different form. - if not verbose and r and op in [PKG_OP_INSTALL, PKG_OP_UPDATE]: - logger.info(_("\nRemoved Packages:\n")) - removals = [src.pkg_stem for src, dest in r] - if len(r) <= 5: - logger.info(" " + "\n ".join(removals)) - else: - logger.info(" " + "\n ".join(removals[:5])) - logger.info(" ...") - logger.info(" {0:d} additional removed package(s). " - "Use 'pkg history' to view " - "the full list.".format(len(r) - 5)) +""" + ) + ) + + # a = change(!!!) (due to fix or mediator/variant/facet) + # c = update + # r = remove + # i = install + a, r, i, c = [], [], [], [] + for src, dest in plan.get_changes(): + if dest is None: + r.append((src, dest)) + elif src is None: + i.append((src, dest)) + elif src != dest: + c.append((src, dest)) + else: + # Changing or repairing package content (e.g. fix, + # change-facet, etc.) + a.append((dest, dest)) + + def bool_str(val): + if val: + return _("Yes") + return _("No") + + status = [] + varcets = plan.get_varcets() + mediators = plan.get_mediators() + if "basic" in disp: + + def cond_show(s1, s2, v): + if v: + status.append((s1, s2.format(v))) + + cond_show(_("Packages to remove:"), "{0:d}", len(r)) + cond_show(_("Packages to install:"), "{0:d}", len(i)) + cond_show(_("Packages to update:"), "{0:d}", len(c)) + if varcets or mediators: + cond_show(_("Packages to change:"), "{0:d}", len(a)) + else: + cond_show(_("Packages to fix:"), "{0:d}", len(a)) + cond_show(_("Mediators to change:"), "{0:d}", len(mediators)) + cond_show(_("Variants/Facets to change:"), "{0:d}", len(varcets)) + if not plan.new_be: + cond_show(_("Services to change:"), "{0:d}", len(plan.services)) + if verbose: + # Only show space information in verbose mode. + abytes = plan.bytes_added + if abytes: + status.append( + ( + _("Estimated space available:"), + misc.bytes_to_str(plan.bytes_avail), + ) + ) + status.append( + ( + _("Estimated space to be consumed:"), + misc.bytes_to_str(plan.bytes_added), + ) + ) + + # only display BE information if we're operating on the + # liveroot environment (since otherwise we'll never be + # manipulating BEs). + if api_inst.is_liveroot: + status.append( + (_("Create boot environment:"), bool_str(plan.new_be)) + ) + + if plan.new_be: + status.append( + ( + _("Activate boot environment:"), + "Next boot only" + if plan.activate_be == "bootnext" + else bool_str(plan.activate_be), + ) + ) + # plan.be_name can be undefined in the uninstall case + # so test it before trying to print it out. + if plan.be_name: + status.append((_("Name of boot environment:"), plan.be_name)) + + status.append( + (_("Create backup boot environment:"), bool_str(plan.backup_be)) + ) + + if "boot-archive" in disp: + status.append( + (_("Rebuild boot archive:"), bool_str(plan.update_boot_archive)) + ) + + # Right-justify all status strings based on length of longest string. + if status: + rjust_status = max(len(s[0]) for s in status) + rjust_value = max(len(s[1]) for s in status) + for s in status: + logger.info( + "{0} {1}".format( + s[0].rjust(rjust_status), s[1].rjust(rjust_value) + ) + ) + + # Display list of removed packages in the default output. + # Verbose output already has this in a different form. + if not verbose and r and op in [PKG_OP_INSTALL, PKG_OP_UPDATE]: + logger.info(_("\nRemoved Packages:\n")) + removals = [src.pkg_stem for src, dest in r] + if len(r) <= 5: + logger.info(" " + "\n ".join(removals)) + else: + logger.info(" " + "\n ".join(removals[:5])) + logger.info(" ...") + logger.info( + " {0:d} additional removed package(s). " + "Use 'pkg history' to view " + "the full list.".format(len(r) - 5) + ) + + need_blank = True + if "mediators" in disp and mediators: + if need_blank: + logger.info("") + + logger.info(_("Changed mediators:")) + for x in mediators: + logger.info(" {0}".format(x)) + # output has trailing blank + need_blank = False + + if "variants/facets" in disp and varcets: + if need_blank: + logger.info("") need_blank = True - if "mediators" in disp and mediators: - if need_blank: - logger.info("") - logger.info(_("Changed mediators:")) - for x in mediators: - logger.info(" {0}".format(x)) - # output has trailing blank - need_blank = False + logger.info(_("Changed variants/facets:")) + for x in varcets: + logger.info(" {0}".format(x)) - if "variants/facets" in disp and varcets: + if "solver-errors" in disp: + first = True + for l in plan.get_solver_errors(): + if first: if need_blank: - logger.info("") + logger.info("") need_blank = True + logger.info(_("Solver dependency errors:")) + first = False + logger.info(l) + + if "fmris" in disp: + changed = collections.defaultdict(list) + for src, dest in itertools.chain(r, i, c, a): + if src and dest: + if src.publisher != dest.publisher: + pparent = "{0} -> {1}".format(src.publisher, dest.publisher) + else: + pparent = dest.publisher + pname = dest.pkg_stem + + # Only display timestamp if version is same and + # timestamp is not between the two fmris. + sver = src.fmri.version + dver = dest.fmri.version + ssver = sver.get_short_version() + dsver = dver.get_short_version() + include_ts = ssver == dsver and sver.timestr != dver.timestr + if include_ts: + pver = sver.get_version(include_build=False) + else: + pver = ssver + + if src != dest: + if include_ts: + pver += " -> {0}".format( + dver.get_version(include_build=False) + ) + else: + pver += " -> {0}".format(dsver) + + elif dest: + pparent = dest.publisher + pname = dest.pkg_stem + pver = "None -> {0}".format( + dest.fmri.version.get_short_version() + ) + else: + pparent = src.publisher + pname = src.pkg_stem + pver = "{0} -> None".format( + src.fmri.version.get_short_version() + ) - logger.info(_("Changed variants/facets:")) - for x in varcets: - logger.info(" {0}".format(x)) - - if "solver-errors" in disp: - first = True - for l in plan.get_solver_errors(): - if first: - if need_blank: - logger.info("") - need_blank = True - logger.info(_("Solver dependency errors:")) - first = False - logger.info(l) - - if "fmris" in disp: - changed = collections.defaultdict(list) - for src, dest in itertools.chain(r, i, c, a): - if src and dest: - if src.publisher != dest.publisher: - pparent = "{0} -> {1}".format( - src.publisher, dest.publisher) - else: - pparent = dest.publisher - pname = dest.pkg_stem - - # Only display timestamp if version is same and - # timestamp is not between the two fmris. - sver = src.fmri.version - dver = dest.fmri.version - ssver = sver.get_short_version() - dsver = dver.get_short_version() - include_ts = (ssver == dsver and - sver.timestr != dver.timestr) - if include_ts: - pver = sver.get_version( - include_build=False) - else: - pver = ssver - - if src != dest: - if include_ts: - pver += " -> {0}".format( - dver.get_version( - include_build=False)) - else: - pver += " -> {0}".format(dsver) - - elif dest: - pparent = dest.publisher - pname = dest.pkg_stem - pver = "None -> {0}".format( - dest.fmri.version.get_short_version()) - else: - pparent = src.publisher - pname = src.pkg_stem - pver = "{0} -> None".format( - src.fmri.version.get_short_version()) - - changed[pparent].append((pname, pver)) - - if changed: - if need_blank: - logger.info("") - need_blank = True - - logger.info(_("Changed packages:")) - last_parent = None - for pparent, pname, pver in ( - (pparent, pname, pver) - for pparent in sorted(changed) - for pname, pver in changed[pparent] - ): - if pparent != last_parent: - logger.info(pparent) - - logger.info(" {0}".format(pname)) - logger.info(" {0}".format(pver)) - last_parent = pparent - - if "actuators" in disp: - # print pkg which have been altered due to pkg actuators - # e.g: - # - # Package-triggered Operations: - # TriggerPackage - # update - # PackageA - # PackageB - # uninstall - # PackageC - - first = True - for trigger_pkg, act_dict in plan.gen_pkg_actuators(): - if first: - first = False - if need_blank: - logger.info("") - need_blank = True - logger.info( - _("Package-triggered Operations:")) - logger.info(trigger_pkg) - for exec_op in sorted(act_dict): - logger.info(" {0}".format(exec_op)) - for pkg in sorted(act_dict[exec_op]): - logger.info( - " {0}".format(pkg)) - - if "services" in disp and not plan.new_be: - last_action = None - for action, smf_fmri in plan.services: - if last_action is None: - if need_blank: - logger.info("") - need_blank = True - logger.info(_("Services:")) - if action != last_action: - logger.info(" {0}:".format(action)) - logger.info(" {0}".format(smf_fmri)) - last_action = action - - # Displaying editable file list is redundant for pkg fix. - if "editable" in disp and op != PKG_OP_FIX: - moved, removed, installed, updated = plan.get_editable_changes() - - cfg_change_fmt = " {0}" - cfg_changes = [] - first = True - - def add_cfg_changes(changes, chg_hdr, chg_fmt=cfg_change_fmt): - first = True - for chg in changes: - if first: - cfg_changes.append(" {0}".format( - chg_hdr)) - first = False - cfg_changes.append(chg_fmt.format(*chg)) - - add_cfg_changes((entry for entry in moved), - _("Move:"), chg_fmt=" {0} -> {1}") - - add_cfg_changes(((src,) for (src, dest) in removed), - _("Remove:")) - - add_cfg_changes(((dest,) for (src, dest) in installed), - _("Install:")) - - add_cfg_changes(((dest,) for (src, dest) in updated), - _("Update:")) - - if cfg_changes: - if need_blank: - logger.info("") - need_blank = True - logger.info(_("Editable files to change:")) - for l in cfg_changes: - logger.info(l) - - if "actions" in disp: - if need_blank: + changed[pparent].append((pname, pver)) + + if changed: + if need_blank: + logger.info("") + need_blank = True + + logger.info(_("Changed packages:")) + last_parent = None + for pparent, pname, pver in ( + (pparent, pname, pver) + for pparent in sorted(changed) + for pname, pver in changed[pparent] + ): + if pparent != last_parent: + logger.info(pparent) + + logger.info(" {0}".format(pname)) + logger.info(" {0}".format(pver)) + last_parent = pparent + + if "actuators" in disp: + # print pkg which have been altered due to pkg actuators + # e.g: + # + # Package-triggered Operations: + # TriggerPackage + # update + # PackageA + # PackageB + # uninstall + # PackageC + + first = True + for trigger_pkg, act_dict in plan.gen_pkg_actuators(): + if first: + first = False + if need_blank: logger.info("") - need_blank = True - - logger.info(_("Actions:")) - for a in plan.get_actions(): - logger.info(" {0}".format(a)) - - seen = False - for (o, n) in plan.get_elided_actions(): - if not seen: - logger.info("") - logger.info(_( - "Elided due to image exclusions:")) - seen = True - - logger.info(f' {o} -> {n}') - - if plan.has_release_notes(): + need_blank = True + logger.info(_("Package-triggered Operations:")) + logger.info(trigger_pkg) + for exec_op in sorted(act_dict): + logger.info(" {0}".format(exec_op)) + for pkg in sorted(act_dict[exec_op]): + logger.info(" {0}".format(pkg)) + + if "services" in disp and not plan.new_be: + last_action = None + for action, smf_fmri in plan.services: + if last_action is None: if need_blank: - logger.info("") + logger.info("") need_blank = True + logger.info(_("Services:")) + if action != last_action: + logger.info(" {0}:".format(action)) + logger.info(" {0}".format(smf_fmri)) + last_action = action + + # Displaying editable file list is redundant for pkg fix. + if "editable" in disp and op != PKG_OP_FIX: + moved, removed, installed, updated = plan.get_editable_changes() + + cfg_change_fmt = " {0}" + cfg_changes = [] + first = True + + def add_cfg_changes(changes, chg_hdr, chg_fmt=cfg_change_fmt): + first = True + for chg in changes: + if first: + cfg_changes.append(" {0}".format(chg_hdr)) + first = False + cfg_changes.append(chg_fmt.format(*chg)) + + add_cfg_changes( + (entry for entry in moved), _("Move:"), chg_fmt=" {0} -> {1}" + ) - if "release-notes" in disp: - logger.info(_("Release Notes:")) - for a in plan.get_release_notes(): - logger.info(" %s", a) - else: - if not plan.new_be and api_inst.is_liveroot and not DebugValues["GenerateNotesFile"]: - logger.info(_("Release notes can be viewed with 'pkg history -n 1 -N'")) - else: - tmp_path = __write_tmp_release_notes(plan) - if tmp_path: - logger.info(_("Release notes can be found in {0} before " - "rebooting.").format(tmp_path)) - logger.info(_("After rebooting, use 'pkg history -n 1 -N' to view release notes.")) - -def __write_tmp_release_notes(plan): - """try to write release notes out to a file in /tmp and return the name""" - if plan.has_release_notes: - try: - fd, path = tempfile.mkstemp(suffix=".txt", prefix="release-notes") - # make file world readable - os.chmod(path, 0o644) - tmpfile = os.fdopen(fd, "w+") - for a in plan.get_release_notes(): - a = misc.force_str(a) - print(a, file=tmpfile) - tmpfile.close() - return path - except Exception: - pass + add_cfg_changes(((src,) for (src, dest) in removed), _("Remove:")) -def __display_parsable_plan(api_inst, parsable_version, child_images=None): - """Display the parsable version of the plan.""" - - assert parsable_version == 0, "parsable_version was {0!r}".format( - parsable_version) - plan = api_inst.describe() - # Set the default values. - added_fmris = [] - removed_fmris = [] - changed_fmris = [] - affected_fmris = [] - backup_be_created = False - new_be_created = False - backup_be_name = None - be_name = None - boot_archive_rebuilt = False - be_activated = True - space_available = None - space_required = None - facets_changed = [] - variants_changed = [] - services_affected = [] - mediators_changed = [] - editables_changed = [] - pkg_actuators = {} - item_messages = {} - licenses = [] - if child_images is None: - child_images = [] - release_notes = [] - - if plan: - for rem, add in plan.get_changes(): - assert rem is not None or add is not None - if rem is not None and add is not None: - # Lists of lists are used here becuase json will - # convert lists of tuples into lists of lists - # anyway. - if rem.fmri == add.fmri: - affected_fmris.append(str(rem)) - else: - changed_fmris.append( - [str(rem), str(add)]) - elif rem is not None: - removed_fmris.append(str(rem)) - else: - added_fmris.append(str(add)) - variants_changed, facets_changed = plan.varcets - backup_be_created = plan.backup_be - new_be_created = plan.new_be - backup_be_name = plan.backup_be_name - be_name = plan.be_name - boot_archive_rebuilt = plan.update_boot_archive - be_activated = plan.activate_be - space_available = plan.bytes_avail - space_required = plan.bytes_added - services_affected = plan.services - mediators_changed = plan.mediators - pkg_actuators = [(p, a) for (p, a) in plan.gen_pkg_actuators()] - - emoved, eremoved, einstalled, eupdated = \ - plan.get_editable_changes() - - # Lists of lists are used here to ensure a consistent ordering - # and because tuples will be convereted to lists anyway; a - # dictionary would be more logical for the top level entries, - # but would make testing more difficult and this is a small, - # known set anyway. - emoved = [[e for e in entry] for entry in emoved] - eremoved = [src for (src, dest) in eremoved] - einstalled = [dest for (src, dest) in einstalled] - eupdated = [dest for (src, dest) in eupdated] - if emoved: - editables_changed.append(["moved", emoved]) - if eremoved: - editables_changed.append(["removed", eremoved]) - if einstalled: - editables_changed.append(["installed", einstalled]) - if eupdated: - editables_changed.append(["updated", eupdated]) - - for n in plan.get_release_notes(): - release_notes.append(n) - - for dfmri, src_li, dest_li, acc, disp in \ - plan.get_licenses(): - src_tup = () - if src_li: - src_tup = (str(src_li.fmri), src_li.license, - src_li.get_text(), src_li.must_accept, - src_li.must_display) - dest_tup = () - if dest_li: - dest_tup = (str(dest_li.fmri), dest_li.license, - dest_li.get_text(), dest_li.must_accept, - dest_li.must_display) - licenses.append( - (str(dfmri), src_tup, dest_tup)) - api_inst.set_plan_license_status(dfmri, dest_li.license, - displayed=True) - item_messages = plan.get_parsable_item_messages() - - ret = { - "activate-be": be_activated, - "add-packages": sorted(added_fmris), - "affect-packages": sorted(affected_fmris), - "affect-services": sorted(services_affected), - "backup-be-name": backup_be_name, - "be-name": be_name, - "boot-archive-rebuild": boot_archive_rebuilt, - "change-facets": sorted(facets_changed), - "change-editables": editables_changed, - "change-mediators": sorted(mediators_changed), - "change-packages": sorted(changed_fmris), - "change-variants": sorted(variants_changed), - "child-images": child_images, - "create-backup-be": backup_be_created, - "create-new-be": new_be_created, - "image-name": None, - "item-messages": item_messages, - "licenses": sorted(licenses, key=lambda x: (x[0], x[1], x[2])), - "release-notes": release_notes, - "remove-packages": sorted(removed_fmris), - "space-available": space_available, - "space-required": space_required, - "version": parsable_version, - } + add_cfg_changes(((dest,) for (src, dest) in installed), _("Install:")) - if pkg_actuators: - ret["package-actuators"] = pkg_actuators + add_cfg_changes(((dest,) for (src, dest) in updated), _("Update:")) - # The image name for the parent image is always None. If this image is - # a child image, then the image name will be set when the parent image - # processes this dictionary. - logger.info(json.dumps(ret)) + if cfg_changes: + if need_blank: + logger.info("") + need_blank = True + logger.info(_("Editable files to change:")) + for l in cfg_changes: + logger.info(l) -def display_plan_licenses(api_inst, show_all=False, show_req=True): - """Helper function to display licenses for the current plan. + if "actions" in disp: + if need_blank: + logger.info("") + need_blank = True - 'show_all' is an optional boolean value indicating whether all licenses - should be displayed or only those that have must-display=true.""" + logger.info(_("Actions:")) + for a in plan.get_actions(): + logger.info(" {0}".format(a)) - plan = api_inst.describe() - for pfmri, src, dest, accepted, displayed in plan.get_licenses(): - if not show_all and not dest.must_display: - continue + seen = False + for o, n in plan.get_elided_actions(): + if not seen: + logger.info("") + logger.info(_("Elided due to image exclusions:")) + seen = True - if not show_all and dest.must_display and displayed: - # License already displayed, so doesn't need to be - # displayed again. - continue + logger.info(f" {o} -> {n}") - lic = dest.license - if show_req: - logger.info("-" * 60) - logger.info(_("Package: {0}").format(pfmri.get_fmri( - include_build=False))) - logger.info(_("License: {0}\n").format(lic)) - logger.info(dest.get_text()) - logger.info("\n") + if plan.has_release_notes(): + if need_blank: + logger.info("") + need_blank = True - # Mark license as having been displayed. - api_inst.set_plan_license_status(pfmri, lic, displayed=True) + if "release-notes" in disp: + logger.info(_("Release Notes:")) + for a in plan.get_release_notes(): + logger.info(" %s", a) + else: + if ( + not plan.new_be + and api_inst.is_liveroot + and not DebugValues["GenerateNotesFile"] + ): + logger.info( + _("Release notes can be viewed with 'pkg history -n 1 -N'") + ) + else: + tmp_path = __write_tmp_release_notes(plan) + if tmp_path: + logger.info( + _( + "Release notes can be found in {0} before " + "rebooting." + ).format(tmp_path) + ) + logger.info( + _( + "After rebooting, use 'pkg history -n 1 -N' to view release notes." + ) + ) -def display_plan(api_inst, child_image_plans, noexecute, omit_headers, op, - parsable_version, quiet, quiet_plan, show_licenses, stage, verbose): - """Display plan function.""" - plan = api_inst.describe() - if not plan: - return +def __write_tmp_release_notes(plan): + """try to write release notes out to a file in /tmp and return the name""" + if plan.has_release_notes: + try: + fd, path = tempfile.mkstemp(suffix=".txt", prefix="release-notes") + # make file world readable + os.chmod(path, 0o644) + tmpfile = os.fdopen(fd, "w+") + for a in plan.get_release_notes(): + a = misc.force_str(a) + print(a, file=tmpfile) + tmpfile.close() + return path + except Exception: + pass - if stage not in [API_STAGE_DEFAULT, API_STAGE_PLAN] and not quiet_plan: - # we should have displayed licenses earlier so mark all - # licenses as having been displayed. - display_plan_licenses(api_inst, show_req=False) - return - if not quiet and parsable_version is None and \ - api_inst.planned_nothingtodo(li_ignore_all=True) and not quiet_plan: - # nothing todo - if op == PKG_OP_UPDATE: - s = _("No updates available for this image.") +def __display_parsable_plan(api_inst, parsable_version, child_images=None): + """Display the parsable version of the plan.""" + + assert parsable_version == 0, "parsable_version was {0!r}".format( + parsable_version + ) + plan = api_inst.describe() + # Set the default values. + added_fmris = [] + removed_fmris = [] + changed_fmris = [] + affected_fmris = [] + backup_be_created = False + new_be_created = False + backup_be_name = None + be_name = None + boot_archive_rebuilt = False + be_activated = True + space_available = None + space_required = None + facets_changed = [] + variants_changed = [] + services_affected = [] + mediators_changed = [] + editables_changed = [] + pkg_actuators = {} + item_messages = {} + licenses = [] + if child_images is None: + child_images = [] + release_notes = [] + + if plan: + for rem, add in plan.get_changes(): + assert rem is not None or add is not None + if rem is not None and add is not None: + # Lists of lists are used here becuase json will + # convert lists of tuples into lists of lists + # anyway. + if rem.fmri == add.fmri: + affected_fmris.append(str(rem)) else: - s = _("No updates necessary for this image.") - if api_inst.ischild(): - s += " ({0})".format(api_inst.get_linked_name()) - msg(s) + changed_fmris.append([str(rem), str(add)]) + elif rem is not None: + removed_fmris.append(str(rem)) + else: + added_fmris.append(str(add)) + variants_changed, facets_changed = plan.varcets + backup_be_created = plan.backup_be + new_be_created = plan.new_be + backup_be_name = plan.backup_be_name + be_name = plan.be_name + boot_archive_rebuilt = plan.update_boot_archive + be_activated = plan.activate_be + space_available = plan.bytes_avail + space_required = plan.bytes_added + services_affected = plan.services + mediators_changed = plan.mediators + pkg_actuators = [(p, a) for (p, a) in plan.gen_pkg_actuators()] + + emoved, eremoved, einstalled, eupdated = plan.get_editable_changes() + + # Lists of lists are used here to ensure a consistent ordering + # and because tuples will be convereted to lists anyway; a + # dictionary would be more logical for the top level entries, + # but would make testing more difficult and this is a small, + # known set anyway. + emoved = [[e for e in entry] for entry in emoved] + eremoved = [src for (src, dest) in eremoved] + einstalled = [dest for (src, dest) in einstalled] + eupdated = [dest for (src, dest) in eupdated] + if emoved: + editables_changed.append(["moved", emoved]) + if eremoved: + editables_changed.append(["removed", eremoved]) + if einstalled: + editables_changed.append(["installed", einstalled]) + if eupdated: + editables_changed.append(["updated", eupdated]) + + for n in plan.get_release_notes(): + release_notes.append(n) + + for dfmri, src_li, dest_li, acc, disp in plan.get_licenses(): + src_tup = () + if src_li: + src_tup = ( + str(src_li.fmri), + src_li.license, + src_li.get_text(), + src_li.must_accept, + src_li.must_display, + ) + dest_tup = () + if dest_li: + dest_tup = ( + str(dest_li.fmri), + dest_li.license, + dest_li.get_text(), + dest_li.must_accept, + dest_li.must_display, + ) + licenses.append((str(dfmri), src_tup, dest_tup)) + api_inst.set_plan_license_status( + dfmri, dest_li.license, displayed=True + ) + item_messages = plan.get_parsable_item_messages() + + ret = { + "activate-be": be_activated, + "add-packages": sorted(added_fmris), + "affect-packages": sorted(affected_fmris), + "affect-services": sorted(services_affected), + "backup-be-name": backup_be_name, + "be-name": be_name, + "boot-archive-rebuild": boot_archive_rebuilt, + "change-facets": sorted(facets_changed), + "change-editables": editables_changed, + "change-mediators": sorted(mediators_changed), + "change-packages": sorted(changed_fmris), + "change-variants": sorted(variants_changed), + "child-images": child_images, + "create-backup-be": backup_be_created, + "create-new-be": new_be_created, + "image-name": None, + "item-messages": item_messages, + "licenses": sorted(licenses, key=lambda x: (x[0], x[1], x[2])), + "release-notes": release_notes, + "remove-packages": sorted(removed_fmris), + "space-available": space_available, + "space-required": space_required, + "version": parsable_version, + } + + if pkg_actuators: + ret["package-actuators"] = pkg_actuators + + # The image name for the parent image is always None. If this image is + # a child image, then the image name will be set when the parent image + # processes this dictionary. + logger.info(json.dumps(ret)) - if op != PKG_OP_FIX or not verbose: - # Even nothingtodo, but need to continue to display INFO - # message if verbose is True. - return - if parsable_version is None and not quiet_plan: - display_plan_licenses(api_inst, show_all=show_licenses) +def display_plan_licenses(api_inst, show_all=False, show_req=True): + """Helper function to display licenses for the current plan. + + 'show_all' is an optional boolean value indicating whether all licenses + should be displayed or only those that have must-display=true.""" + + plan = api_inst.describe() + for pfmri, src, dest, accepted, displayed in plan.get_licenses(): + if not show_all and not dest.must_display: + continue + + if not show_all and dest.must_display and displayed: + # License already displayed, so doesn't need to be + # displayed again. + continue + + lic = dest.license + if show_req: + logger.info("-" * 60) + logger.info( + _("Package: {0}").format(pfmri.get_fmri(include_build=False)) + ) + logger.info(_("License: {0}\n").format(lic)) + logger.info(dest.get_text()) + logger.info("\n") + + # Mark license as having been displayed. + api_inst.set_plan_license_status(pfmri, lic, displayed=True) + + +def display_plan( + api_inst, + child_image_plans, + noexecute, + omit_headers, + op, + parsable_version, + quiet, + quiet_plan, + show_licenses, + stage, + verbose, +): + """Display plan function.""" + + plan = api_inst.describe() + if not plan: + return + + if stage not in [API_STAGE_DEFAULT, API_STAGE_PLAN] and not quiet_plan: + # we should have displayed licenses earlier so mark all + # licenses as having been displayed. + display_plan_licenses(api_inst, show_req=False) + return + + if ( + not quiet + and parsable_version is None + and api_inst.planned_nothingtodo(li_ignore_all=True) + and not quiet_plan + ): + # nothing todo + if op == PKG_OP_UPDATE: + s = _("No updates available for this image.") + else: + s = _("No updates necessary for this image.") + if api_inst.ischild(): + s += " ({0})".format(api_inst.get_linked_name()) + msg(s) + + if op != PKG_OP_FIX or not verbose: + # Even nothingtodo, but need to continue to display INFO + # message if verbose is True. + return - if not quiet and not quiet_plan: - __display_plan(api_inst, verbose, noexecute, op=op) + if parsable_version is None and not quiet_plan: + display_plan_licenses(api_inst, show_all=show_licenses) + + if not quiet and not quiet_plan: + __display_plan(api_inst, verbose, noexecute, op=op) + + if parsable_version is not None: + __display_parsable_plan(api_inst, parsable_version, child_image_plans) + elif not quiet: + if not quiet_plan: + # Ensure a blank line is inserted before the message + # output. + msg() + + last_item_id = None + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(ordered=True): + ntd = api_inst.planned_nothingtodo(li_ignore_all=True) + if last_item_id is None or last_item_id != item_id: + last_item_id = item_id + if op == PKG_OP_FIX and not noexecute and msg_type == MSG_ERROR: + if ntd: + msg(_("Could not repair: {0:50}").format(item_id)) + else: + msg(_("Repairing: {0:50}").format(item_id)) + + if op == PKG_OP_FIX: + if not verbose and msg_type == MSG_INFO: + # If verbose is False, don't display + # any INFO messages. + continue - if parsable_version is not None: - __display_parsable_plan(api_inst, parsable_version, - child_image_plans) - elif not quiet: - if not quiet_plan: - # Ensure a blank line is inserted before the message - # output. - msg() - - last_item_id = None - for item_id, parent_id, msg_time, msg_level, msg_type, \ - msg_text in plan.gen_item_messages(ordered=True): - ntd = api_inst.planned_nothingtodo(li_ignore_all=True) - if last_item_id is None or last_item_id != item_id: - last_item_id = item_id - if op == PKG_OP_FIX and not noexecute and \ - msg_type == MSG_ERROR: - if ntd: - msg(_("Could not repair: {0:50}" - ).format(item_id)) - else: - msg(_("Repairing: {0:50}" - ).format(item_id)) - - if op == PKG_OP_FIX: - if not verbose and msg_type == MSG_INFO: - # If verbose is False, don't display - # any INFO messages. - continue - - if not omit_headers: - omit_headers = True - msg(_("{pkg_name:70} {result:>7}").format( - pkg_name=_("PACKAGE"), - result=_("STATUS"))) - - msg(msg_text) - -def __print_verify_result(op, api_inst, plan, noexecute, omit_headers, - verbose, print_packaged=True): - did_print_something = False - if print_packaged: - last_item_id = None - ntd = api_inst.planned_nothingtodo(li_ignore_all=True) - for item_id, parent_id, msg_time, msg_level, msg_type, \ - msg_text in plan.gen_item_messages(ordered=True): - if msg_type == MSG_UNPACKAGED: - continue - if parent_id is None and last_item_id != item_id: - if op == PKG_OP_FIX and not noexecute and \ - msg_level == MSG_ERROR: - if ntd: - msg(_("Could not repair: {0:50}" - ).format(item_id)) - else: - msg(_("Repairing: {0:50}" - ).format(item_id)) - - if op in [PKG_OP_FIX, PKG_OP_VERIFY]: - if not verbose and msg_level == MSG_INFO: - # If verbose is False, don't display - # any INFO messages. - continue - - if not omit_headers: - omit_headers = True - msg(_("{pkg_name:70} {result:>7}" - ).format(pkg_name=_("PACKAGE"), - result=_("STATUS"))) - - # Top level message. - if not parent_id: - msg(msg_text) - elif last_item_id != item_id: - # A new action id; we need to print it out and - # then group its subsequent messages. - msg(_("\t{0}").format(item_id)) - if msg_text: - msg(_("\t\t{0}").format(msg_text)) - else: - if msg_text: - msg(_("\t\t{0}").format(msg_text)) - last_item_id = item_id - did_print_something = True - else: if not omit_headers: - msg(_("UNPACKAGED CONTENTS")) - - # Print warning messages at the beginning. - for item_id, parent_id, msg_time, msg_level, msg_type, \ - msg_text in plan.gen_item_messages(): - if msg_type != MSG_UNPACKAGED: - continue - if msg_level == MSG_WARNING: - msg(_("WARNING: {0}").format(msg_text)) - # Print the rest of messages. - for item_id, parent_id, msg_time, msg_level, msg_type, \ - msg_text in plan.gen_item_messages(ordered=True): - if msg_type != MSG_UNPACKAGED: - continue - if msg_level == MSG_INFO: - msg(_("{0}:\n\t{1}").format( - item_id, msg_text)) - elif msg_level == MSG_ERROR: - msg(_("ERROR: {0}").format(msg_text)) - did_print_something = True - return did_print_something - -def display_plan_cb(api_inst, child_image_plans=None, noexecute=False, - omit_headers=False, op=None, parsable_version=None, quiet=False, - quiet_plan=False, show_licenses=False, stage=None, verbose=None, - unpackaged=False, unpackaged_only=False, plan_only=False): - """Callback function for displaying plan.""" - - if plan_only: - __display_plan(api_inst, verbose, noexecute) - return + omit_headers = True + msg( + _("{pkg_name:70} {result:>7}").format( + pkg_name=_("PACKAGE"), result=_("STATUS") + ) + ) + + msg(msg_text) + + +def __print_verify_result( + op, api_inst, plan, noexecute, omit_headers, verbose, print_packaged=True +): + did_print_something = False + if print_packaged: + last_item_id = None + ntd = api_inst.planned_nothingtodo(li_ignore_all=True) + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(ordered=True): + if msg_type == MSG_UNPACKAGED: + continue + if parent_id is None and last_item_id != item_id: + if ( + op == PKG_OP_FIX + and not noexecute + and msg_level == MSG_ERROR + ): + if ntd: + msg(_("Could not repair: {0:50}").format(item_id)) + else: + msg(_("Repairing: {0:50}").format(item_id)) - plan = api_inst.describe() - if not plan: - return + if op in [PKG_OP_FIX, PKG_OP_VERIFY]: + if not verbose and msg_level == MSG_INFO: + # If verbose is False, don't display + # any INFO messages. + continue - if stage not in [API_STAGE_DEFAULT, API_STAGE_PLAN] and not quiet_plan: - # we should have displayed licenses earlier so mark all - # licenses as having been displayed. - display_plan_licenses(api_inst, show_req=False) - return + if not omit_headers: + omit_headers = True + msg( + _("{pkg_name:70} {result:>7}").format( + pkg_name=_("PACKAGE"), result=_("STATUS") + ) + ) + + # Top level message. + if not parent_id: + msg(msg_text) + elif last_item_id != item_id: + # A new action id; we need to print it out and + # then group its subsequent messages. + msg(_("\t{0}").format(item_id)) + if msg_text: + msg(_("\t\t{0}").format(msg_text)) + else: + if msg_text: + msg(_("\t\t{0}").format(msg_text)) + last_item_id = item_id + did_print_something = True + else: + if not omit_headers: + msg(_("UNPACKAGED CONTENTS")) + + # Print warning messages at the beginning. + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(): + if msg_type != MSG_UNPACKAGED: + continue + if msg_level == MSG_WARNING: + msg(_("WARNING: {0}").format(msg_text)) + # Print the rest of messages. + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(ordered=True): + if msg_type != MSG_UNPACKAGED: + continue + if msg_level == MSG_INFO: + msg(_("{0}:\n\t{1}").format(item_id, msg_text)) + elif msg_level == MSG_ERROR: + msg(_("ERROR: {0}").format(msg_text)) + did_print_something = True + return did_print_something + + +def display_plan_cb( + api_inst, + child_image_plans=None, + noexecute=False, + omit_headers=False, + op=None, + parsable_version=None, + quiet=False, + quiet_plan=False, + show_licenses=False, + stage=None, + verbose=None, + unpackaged=False, + unpackaged_only=False, + plan_only=False, +): + """Callback function for displaying plan.""" + + if plan_only: + __display_plan(api_inst, verbose, noexecute) + return + + plan = api_inst.describe() + if not plan: + return + + if stage not in [API_STAGE_DEFAULT, API_STAGE_PLAN] and not quiet_plan: + # we should have displayed licenses earlier so mark all + # licenses as having been displayed. + display_plan_licenses(api_inst, show_req=False) + return + + if ( + not quiet + and parsable_version is None + and api_inst.planned_nothingtodo(li_ignore_all=True) + and not quiet_plan + ): + # nothing todo + if op == PKG_OP_UPDATE: + s = _("No updates available for this image.") + else: + s = _("No updates necessary for this image.") + if api_inst.ischild(): + s += " ({0})".format(api_inst.get_linked_name()) + msg(s) + + if op not in [PKG_OP_FIX, PKG_OP_VERIFY] or not verbose: + # Even nothingtodo, but need to continue to display INFO + # message if verbose is True. + return - if not quiet and parsable_version is None and \ - api_inst.planned_nothingtodo(li_ignore_all=True) and not quiet_plan: - # nothing todo - if op == PKG_OP_UPDATE: - s = _("No updates available for this image.") - else: - s = _("No updates necessary for this image.") - if api_inst.ischild(): - s += " ({0})".format(api_inst.get_linked_name()) - msg(s) + if parsable_version is None and not quiet_plan: + display_plan_licenses(api_inst, show_all=show_licenses) - if op not in [PKG_OP_FIX, PKG_OP_VERIFY] or not verbose: - # Even nothingtodo, but need to continue to display INFO - # message if verbose is True. - return + if not quiet and not quiet_plan: + __display_plan(api_inst, verbose, noexecute, op=op) - if parsable_version is None and not quiet_plan: - display_plan_licenses(api_inst, show_all=show_licenses) + if parsable_version is not None: + parsable_plan = plan.get_parsable_plan( + parsable_version, child_image_plans, api_inst=api_inst + ) + logger.info(json.dumps(parsable_plan)) + elif not quiet: + if not quiet_plan: + # Ensure a blank line is inserted before the message + # output. + msg() + + # Message print for package verification result. + if not unpackaged_only: + did_print = __print_verify_result( + op, api_inst, plan, noexecute, omit_headers, verbose + ) + + # Print an extra line to separate output between + # packaged and unpackaged content. + if ( + did_print + and unpackaged + and any( + entry[4] == MSG_UNPACKAGED + for entry in plan.gen_item_messages() + ) + ): + msg("".join(["-"] * 80)) - if not quiet and not quiet_plan: - __display_plan(api_inst, verbose, noexecute, op=op) + if unpackaged or unpackaged_only: + __print_verify_result( + op, + api_inst, + plan, + noexecute, + omit_headers, + verbose, + print_packaged=False, + ) - if parsable_version is not None: - parsable_plan = plan.get_parsable_plan(parsable_version, - child_image_plans, api_inst=api_inst) - logger.info(json.dumps(parsable_plan)) - elif not quiet: - if not quiet_plan: - # Ensure a blank line is inserted before the message - # output. - msg() - - # Message print for package verification result. - if not unpackaged_only: - did_print = __print_verify_result(op, api_inst, plan, - noexecute, omit_headers, verbose) - - # Print an extra line to separate output between - # packaged and unpackaged content. - if did_print and unpackaged and any(entry[4] == - MSG_UNPACKAGED for entry in - plan.gen_item_messages()): - msg("".join(["-"] * 80)) - - if unpackaged or unpackaged_only: - __print_verify_result(op, api_inst, plan, noexecute, - omit_headers, verbose, print_packaged=False) def __display_plan_messages(api_inst, stages=None): - """Print out any messages generated during the specified - stages.""" - if not isinstance(stages, frozenset): - stages = frozenset([stages]) - plan = api_inst.describe() - if not plan: - return - for item_id, parent_id, msg_time, msg_level, msg_type, msg_text in \ - plan.gen_item_messages(ordered=True, stages=stages): - if msg_level == MSG_INFO: - msg("\n" + _("{0}").format(msg_text)) - elif msg_level == MSG_WARNING: - emsg("\n" + _("WARNING: {0}").format(msg_text)) - else: - emsg("\n" + _("ERROR: {0}").format(msg_text)) + """Print out any messages generated during the specified + stages.""" + if not isinstance(stages, frozenset): + stages = frozenset([stages]) + plan = api_inst.describe() + if not plan: + return + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(ordered=True, stages=stages): + if msg_level == MSG_INFO: + msg("\n" + _("{0}").format(msg_text)) + elif msg_level == MSG_WARNING: + emsg("\n" + _("WARNING: {0}").format(msg_text)) + else: + emsg("\n" + _("ERROR: {0}").format(msg_text)) + def __api_prepare_plan(operation, api_inst): - """Prepare plan.""" + """Prepare plan.""" + + # Exceptions which happen here are printed in the above level, with + # or without some extra decoration done here. + # XXX would be nice to kick the progress tracker. + try: + api_inst.prepare() + except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e)) + return EXIT_OOPS + except api_errors.TransportError as e: + # move past the progress tracker line. + msg("\n") + raise e + except api_errors.PlanLicenseErrors as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + logger.error("\n") + error( + _( + "The following packages require their " + "licenses to be accepted before they can be installed " + "or updated: " + ) + ) + logger.error(str(e)) + logger.error( + _( + "To indicate that you agree to and accept the " + "terms of the licenses of the packages listed above, " + "use the --accept option. To display all of the related " + "licenses, use the --licenses option." + ) + ) + return EXIT_LICENSE + except api_errors.InvalidPlanError as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e)) + return EXIT_OOPS + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.ImageInsufficentSpace as e: + error(str(e)) + return EXIT_OOPS + except KeyboardInterrupt: + raise + except: + error( + _( + "\nAn unexpected error happened while preparing for " "{0}:" + ).format(operation) + ) + raise + finally: + __display_plan_messages(api_inst, OP_STAGE_PREP) + return EXIT_OK - # Exceptions which happen here are printed in the above level, with - # or without some extra decoration done here. - # XXX would be nice to kick the progress tracker. - try: - api_inst.prepare() - except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e)) - return EXIT_OOPS - except api_errors.TransportError as e: - # move past the progress tracker line. - msg("\n") - raise e - except api_errors.PlanLicenseErrors as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - logger.error("\n") - error(_("The following packages require their " - "licenses to be accepted before they can be installed " - "or updated: ")) - logger.error(str(e)) - logger.error(_("To indicate that you agree to and accept the " - "terms of the licenses of the packages listed above, " - "use the --accept option. To display all of the related " - "licenses, use the --licenses option.")) - return EXIT_LICENSE - except api_errors.InvalidPlanError as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e)) - return EXIT_OOPS - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.ImageInsufficentSpace as e: - error(str(e)) - return EXIT_OOPS - except KeyboardInterrupt: - raise - except: - error(_("\nAn unexpected error happened while preparing for " - "{0}:").format(operation)) - raise - finally: - __display_plan_messages(api_inst, OP_STAGE_PREP) - return EXIT_OK def __api_execute_plan(operation, api_inst): - """Execute plan.""" + """Execute plan.""" + + rval = None + try: + api_inst.execute_plan() + pd = api_inst.describe() + if pd.actuator_timed_out: + rval = EXIT_ACTUATOR + else: + rval = EXIT_OK + except RuntimeError as e: + error(_("{operation} failed: {err}").format(operation=operation, err=e)) + rval = EXIT_OOPS + except ( + api_errors.InvalidPlanError, + api_errors.ActionExecutionError, + api_errors.InvalidPackageErrors, + api_errors.PlanExclusionError, + ) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e)) + rval = EXIT_OOPS + except api_errors.LinkedImageException as e: + error( + _( + "{operation} failed (linked image exception(s)):\n" "{err}" + ).format(operation=operation, err=e) + ) + rval = e.lix_exitrv + except api_errors.ImageUpdateOnLiveImageException: + error(_("{0} cannot be done on live image").format(operation)) + rval = EXIT_NOTLIVE + except api_errors.RebootNeededOnLiveImageException: + error( + _( + 'Requested "{0}" operation would affect files that ' + "cannot be modified in live image.\n" + "Please retry this operation on an alternate boot " + "environment." + ).format(operation) + ) + rval = EXIT_NOTLIVE + except api_errors.CorruptedIndexException as e: + error( + "The search index appears corrupted. Please rebuild the " + "index with 'pkg rebuild-index'." + ) + rval = EXIT_OOPS + except api_errors.ProblematicPermissionsIndexException as e: + error(str(e)) + error( + _( + "\n(Failure to consistently execute pkg commands as a " + "privileged user is often a source of this problem.)" + ) + ) + rval = EXIT_OOPS + except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e)) + rval = EXIT_OOPS + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + rval = EXIT_OOPS + except api_errors.BEException as e: + error(e) + rval = EXIT_OOPS + except api_errors.WrapSuccessfulIndexingException: + raise + except api_errors.ImageInsufficentSpace as e: + error(str(e)) + rval = EXIT_OOPS + except Exception as e: + error( + _( + "An unexpected error happened during " "{operation}: {err}" + ).format(operation=operation, err=e) + ) + raise + finally: + exc_type = exc_value = exc_tb = None + if rval is None: + # Store original exception so that the real cause of + # failure can be raised if this fails. + exc_type, exc_value, exc_tb = sys.exc_info() + + __display_plan_messages(api_inst, OP_STAGE_EXEC) - rval = None try: - api_inst.execute_plan() - pd = api_inst.describe() - if pd.actuator_timed_out: - rval = EXIT_ACTUATOR - else: - rval = EXIT_OK - except RuntimeError as e: - error(_("{operation} failed: {err}").format( - operation=operation, err=e)) - rval = EXIT_OOPS - except (api_errors.InvalidPlanError, - api_errors.ActionExecutionError, - api_errors.InvalidPackageErrors, - api_errors.PlanExclusionError) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e)) - rval = EXIT_OOPS - except (api_errors.LinkedImageException) as e: - error(_("{operation} failed (linked image exception(s)):\n" - "{err}").format(operation=operation, err=e)) - rval = e.lix_exitrv - except api_errors.ImageUpdateOnLiveImageException: - error(_("{0} cannot be done on live image").format(operation)) - rval = EXIT_NOTLIVE - except api_errors.RebootNeededOnLiveImageException: - error(_("Requested \"{0}\" operation would affect files that " - "cannot be modified in live image.\n" - "Please retry this operation on an alternate boot " - "environment.").format(operation)) - rval = EXIT_NOTLIVE - except api_errors.CorruptedIndexException as e: - error("The search index appears corrupted. Please rebuild the " - "index with 'pkg rebuild-index'.") - rval = EXIT_OOPS - except api_errors.ProblematicPermissionsIndexException as e: - error(str(e)) - error(_("\n(Failure to consistently execute pkg commands as a " - "privileged user is often a source of this problem.)")) - rval = EXIT_OOPS - except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e)) - rval = EXIT_OOPS - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - rval = EXIT_OOPS - except api_errors.BEException as e: - error(e) - rval = EXIT_OOPS - except api_errors.WrapSuccessfulIndexingException: - raise - except api_errors.ImageInsufficentSpace as e: - error(str(e)) - rval = EXIT_OOPS - except Exception as e: - error(_("An unexpected error happened during " - "{operation}: {err}").format( - operation=operation, err=e)) + salvaged = api_inst.describe().salvaged + newbe = api_inst.describe().new_be + if salvaged and (rval == EXIT_OK or not newbe): + # Only show salvaged file list if populated + # and operation was successful, or if operation + # failed and a new BE was not created for + # the operation. + logger.error("") + logger.error( + _( + "The following unexpected or " + "editable files and directories were\n" + "salvaged while executing the requested " + "package operation; they\nhave been moved " + "to the displayed location in the image:\n" + ) + ) + for opath, spath in salvaged: + logger.error(" {0} -> {1}".format(opath, spath)) + except Exception: + if rval is not None: + # Only raise exception encountered here if the + # exception previously raised was suppressed. raise - finally: - exc_type = exc_value = exc_tb = None - if rval is None: - # Store original exception so that the real cause of - # failure can be raised if this fails. - exc_type, exc_value, exc_tb = sys.exc_info() - __display_plan_messages(api_inst, OP_STAGE_EXEC) + if exc_value or exc_tb: + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value - try: - salvaged = api_inst.describe().salvaged - newbe = api_inst.describe().new_be - if salvaged and (rval == EXIT_OK or not newbe): - # Only show salvaged file list if populated - # and operation was successful, or if operation - # failed and a new BE was not created for - # the operation. - logger.error("") - logger.error(_("The following unexpected or " - "editable files and directories were\n" - "salvaged while executing the requested " - "package operation; they\nhave been moved " - "to the displayed location in the image:\n")) - for opath, spath in salvaged: - logger.error(" {0} -> {1}".format( - opath, spath)) - except Exception: - if rval is not None: - # Only raise exception encountered here if the - # exception previously raised was suppressed. - raise - - if exc_value or exc_tb: - if six.PY2: - six.reraise(exc_value, None, exc_tb) - else: - raise exc_value + return rval - return rval def __api_alloc(imgdir, exact_match, pkg_image_used): - """Allocate API instance.""" + """Allocate API instance.""" + + progresstracker = get_tracker() + try: + return api.ImageInterface( + imgdir, + CLIENT_API_VERSION, + progresstracker, + None, + PKG_CLIENT_NAME, + exact_match=exact_match, + ) + except api_errors.ImageNotFoundException as e: + if e.user_specified: + if pkg_image_used: + error( + _("No image rooted at '{0}' " "(set by $PKG_IMAGE)").format( + e.user_dir + ) + ) + else: + error(_("No image rooted at '{0}'").format(e.user_dir)) + else: + error(_("No image found.")) + return + except api_errors.PermissionsException as e: + error(e) + return + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return - progresstracker = get_tracker() - try: - return api.ImageInterface(imgdir, CLIENT_API_VERSION, - progresstracker, None, PKG_CLIENT_NAME, - exact_match=exact_match) - except api_errors.ImageNotFoundException as e: - if e.user_specified: - if pkg_image_used: - error(_("No image rooted at '{0}' " - "(set by $PKG_IMAGE)").format(e.user_dir)) - else: - error(_("No image rooted at '{0}'").format( - e.user_dir)) - else: - error(_("No image found.")) - return - except api_errors.PermissionsException as e: - error(e) - return - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return def __api_plan_exception(op, noexecute, verbose, api_inst): - """Handle plan exception.""" - - e_type, e, e_traceback = sys.exc_info() - - if e_type == api_errors.ImageNotFoundException: - error(_("No image rooted at '{0}'").format(e.user_dir), cmd=op) - return EXIT_OOPS - if e_type == api_errors.InventoryException: - error("\n" + _("{operation} failed (inventory exception):\n" - "{err}").format(operation=op, err=e)) - return EXIT_OOPS - if isinstance(e, api_errors.LinkedImageException): - error(_("{operation} failed (linked image exception(s)):\n" - "{err}").format(operation=op, err=e)) - return e.lix_exitrv - if e_type == api_errors.IpkgOutOfDateException: - msg(_("""\ + """Handle plan exception.""" + + e_type, e, e_traceback = sys.exc_info() + + if e_type == api_errors.ImageNotFoundException: + error(_("No image rooted at '{0}'").format(e.user_dir), cmd=op) + return EXIT_OOPS + if e_type == api_errors.InventoryException: + error( + "\n" + + _("{operation} failed (inventory exception):\n" "{err}").format( + operation=op, err=e + ) + ) + return EXIT_OOPS + if isinstance(e, api_errors.LinkedImageException): + error( + _( + "{operation} failed (linked image exception(s)):\n" "{err}" + ).format(operation=op, err=e) + ) + return e.lix_exitrv + if e_type == api_errors.IpkgOutOfDateException: + msg( + _( + """\ WARNING: pkg(7) appears to be out of date, and should be updated before running {op}. Please update pkg(7) by executing 'pkg install pkg:/package/pkg' as a privileged user and then retry the {op}.""" - ).format(**locals())) - return EXIT_OOPS - if e_type == api_errors.NonLeafPackageException: - error("\n" + str(e), cmd=op) - return EXIT_OOPS - if e_type == api_errors.CatalogRefreshException: - display_catalog_failures(e) - return EXIT_OOPS - if e_type == api_errors.ConflictingActionErrors or \ - e_type == api_errors.ImageBoundaryErrors: - if verbose: - __display_plan(api_inst, verbose, noexecute) - error("\n" + str(e), cmd=op) - return EXIT_OOPS - if e_type in (api_errors.InvalidPlanError, - api_errors.ReadOnlyFileSystemException, - api_errors.ActionExecutionError, - api_errors.InvalidPackageErrors, - api_errors.PlanExclusionError): - error("\n" + str(e), cmd=op) - return EXIT_OOPS - if e_type == api_errors.ImageFormatUpdateNeeded: - format_update_error(e) - return EXIT_OOPS - - if e_type == api_errors.ImageUpdateOnLiveImageException: - error("\n" + _("The proposed operation cannot be performed on " - "a live image."), cmd=op) - return EXIT_NOTLIVE - - if issubclass(e_type, api_errors.BEException): - error("\n" + str(e), cmd=op) - return EXIT_OOPS - - if e_type == api_errors.PlanCreationException: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - txt = str(e) - if e.multiple_matches: - txt += "\n\n" + _("Please provide one of the package " - "FMRIs listed above to the install command.") - error("\n" + txt, cmd=op) - if verbose: - logger.error("\n".join(e.verbose_info)) - if e.invalid_mediations: - # Bad user input for mediation. - return EXIT_BADOPT - return EXIT_OOPS - - if isinstance(e, (api_errors.CertificateError, + ).format(**locals()) + ) + return EXIT_OOPS + if e_type == api_errors.NonLeafPackageException: + error("\n" + str(e), cmd=op) + return EXIT_OOPS + if e_type == api_errors.CatalogRefreshException: + display_catalog_failures(e) + return EXIT_OOPS + if ( + e_type == api_errors.ConflictingActionErrors + or e_type == api_errors.ImageBoundaryErrors + ): + if verbose: + __display_plan(api_inst, verbose, noexecute) + error("\n" + str(e), cmd=op) + return EXIT_OOPS + if e_type in ( + api_errors.InvalidPlanError, + api_errors.ReadOnlyFileSystemException, + api_errors.ActionExecutionError, + api_errors.InvalidPackageErrors, + api_errors.PlanExclusionError, + ): + error("\n" + str(e), cmd=op) + return EXIT_OOPS + if e_type == api_errors.ImageFormatUpdateNeeded: + format_update_error(e) + return EXIT_OOPS + + if e_type == api_errors.ImageUpdateOnLiveImageException: + error( + "\n" + + _( + "The proposed operation cannot be performed on " "a live image." + ), + cmd=op, + ) + return EXIT_NOTLIVE + + if issubclass(e_type, api_errors.BEException): + error("\n" + str(e), cmd=op) + return EXIT_OOPS + + if e_type == api_errors.PlanCreationException: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + txt = str(e) + if e.multiple_matches: + txt += "\n\n" + _( + "Please provide one of the package " + "FMRIs listed above to the install command." + ) + error("\n" + txt, cmd=op) + if verbose: + logger.error("\n".join(e.verbose_info)) + if e.invalid_mediations: + # Bad user input for mediation. + return EXIT_BADOPT + return EXIT_OOPS + + if isinstance( + e, + ( + api_errors.CertificateError, api_errors.UnknownErrors, api_errors.PermissionsException, api_errors.InvalidPropertyValue, - api_errors.InvalidResourceLocation)): - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e), cmd=op) - return EXIT_OOPS - if e_type == fmri.IllegalFmri: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e), cmd=op) - return EXIT_OOPS - if isinstance(e, api_errors.SigningException): - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e), cmd=op) - return EXIT_OOPS - if isinstance(e, (api_errors.UnsupportedVariantGlobbing, - api_errors.InvalidVarcetNames, api_errors.UnsupportedFacetChange)): - error(str(e), cmd=op) - return EXIT_OOPS - - # if we didn't deal with the exception above, pass it on. - raise - # NOTREACHED - -def __api_plan(_op, _api_inst, _accept=False, _li_ignore=None, _noexecute=False, - _omit_headers=False, _origins=None, _parsable_version=None, _quiet=False, - _quiet_plan=False, _review_release_notes=False, _show_licenses=False, - _stage=API_STAGE_DEFAULT, _verbose=0, **kwargs): - """API plan invocation entry.""" - - # All the api interface functions that we invoke have some - # common arguments. Set those up now. - if _op not in (PKG_OP_REVERT, PKG_OP_FIX, PKG_OP_DEHYDRATE, - PKG_OP_REHYDRATE): - kwargs["li_ignore"] = _li_ignore - kwargs["noexecute"] = _noexecute - if _origins: - kwargs["repos"] = _origins - if _stage != API_STAGE_DEFAULT: - kwargs["pubcheck"] = False - - # display plan debugging information - if _verbose > 2: - DebugValues.set_value("plan", "True") - - # plan the requested operation - stuff_to_do = None - - if _op == PKG_OP_ATTACH: - api_plan_func = _api_inst.gen_plan_attach - elif _op in [PKG_OP_CHANGE_FACET, PKG_OP_CHANGE_VARIANT]: - api_plan_func = _api_inst.gen_plan_change_varcets - elif _op == PKG_OP_DEHYDRATE: - api_plan_func = _api_inst.gen_plan_dehydrate - elif _op == PKG_OP_DETACH: - api_plan_func = _api_inst.gen_plan_detach - elif _op == PKG_OP_EXACT_INSTALL: - api_plan_func = _api_inst.gen_plan_exact_install - elif _op == PKG_OP_FIX: - api_plan_func = _api_inst.gen_plan_fix - elif _op == PKG_OP_INSTALL: - api_plan_func = _api_inst.gen_plan_install - elif _op == PKG_OP_REHYDRATE: - api_plan_func = _api_inst.gen_plan_rehydrate - elif _op == PKG_OP_REVERT: - api_plan_func = _api_inst.gen_plan_revert - elif _op == PKG_OP_SYNC: - api_plan_func = _api_inst.gen_plan_sync - elif _op == PKG_OP_UNINSTALL: - api_plan_func = _api_inst.gen_plan_uninstall - elif _op == PKG_OP_UPDATE: - api_plan_func = _api_inst.gen_plan_update - else: - raise RuntimeError("__api_plan() invalid op: {0}".format(_op)) + api_errors.InvalidResourceLocation, + ), + ): + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e), cmd=op) + return EXIT_OOPS + if e_type == fmri.IllegalFmri: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e), cmd=op) + return EXIT_OOPS + if isinstance(e, api_errors.SigningException): + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e), cmd=op) + return EXIT_OOPS + if isinstance( + e, + ( + api_errors.UnsupportedVariantGlobbing, + api_errors.InvalidVarcetNames, + api_errors.UnsupportedFacetChange, + ), + ): + error(str(e), cmd=op) + return EXIT_OOPS + + # if we didn't deal with the exception above, pass it on. + raise + # NOTREACHED + + +def __api_plan( + _op, + _api_inst, + _accept=False, + _li_ignore=None, + _noexecute=False, + _omit_headers=False, + _origins=None, + _parsable_version=None, + _quiet=False, + _quiet_plan=False, + _review_release_notes=False, + _show_licenses=False, + _stage=API_STAGE_DEFAULT, + _verbose=0, + **kwargs, +): + """API plan invocation entry.""" + + # All the api interface functions that we invoke have some + # common arguments. Set those up now. + if _op not in ( + PKG_OP_REVERT, + PKG_OP_FIX, + PKG_OP_DEHYDRATE, + PKG_OP_REHYDRATE, + ): + kwargs["li_ignore"] = _li_ignore + kwargs["noexecute"] = _noexecute + if _origins: + kwargs["repos"] = _origins + if _stage != API_STAGE_DEFAULT: + kwargs["pubcheck"] = False + + # display plan debugging information + if _verbose > 2: + DebugValues.set_value("plan", "True") + + # plan the requested operation + stuff_to_do = None + + if _op == PKG_OP_ATTACH: + api_plan_func = _api_inst.gen_plan_attach + elif _op in [PKG_OP_CHANGE_FACET, PKG_OP_CHANGE_VARIANT]: + api_plan_func = _api_inst.gen_plan_change_varcets + elif _op == PKG_OP_DEHYDRATE: + api_plan_func = _api_inst.gen_plan_dehydrate + elif _op == PKG_OP_DETACH: + api_plan_func = _api_inst.gen_plan_detach + elif _op == PKG_OP_EXACT_INSTALL: + api_plan_func = _api_inst.gen_plan_exact_install + elif _op == PKG_OP_FIX: + api_plan_func = _api_inst.gen_plan_fix + elif _op == PKG_OP_INSTALL: + api_plan_func = _api_inst.gen_plan_install + elif _op == PKG_OP_REHYDRATE: + api_plan_func = _api_inst.gen_plan_rehydrate + elif _op == PKG_OP_REVERT: + api_plan_func = _api_inst.gen_plan_revert + elif _op == PKG_OP_SYNC: + api_plan_func = _api_inst.gen_plan_sync + elif _op == PKG_OP_UNINSTALL: + api_plan_func = _api_inst.gen_plan_uninstall + elif _op == PKG_OP_UPDATE: + api_plan_func = _api_inst.gen_plan_update + else: + raise RuntimeError("__api_plan() invalid op: {0}".format(_op)) + + planned_self = False + child_plans = [] + try: + for pd in api_plan_func(**kwargs): + if planned_self: + # we don't display anything for child images + # since they currently do their own display + # work (unless parsable output is requested). + child_plans.append(pd) + continue + + # the first plan description is always for ourself. + planned_self = True + pkg_timer.record("planning", logger=logger) + + # if we're in parsable mode don't display anything + # until after we finish planning for all children + if _parsable_version is None: + display_plan( + _api_inst, + [], + _noexecute, + _omit_headers, + _op, + _parsable_version, + _quiet, + _quiet_plan, + _show_licenses, + _stage, + _verbose, + ) - planned_self = False - child_plans = [] - try: - for pd in api_plan_func(**kwargs): - if planned_self: - # we don't display anything for child images - # since they currently do their own display - # work (unless parsable output is requested). - child_plans.append(pd) - continue - - # the first plan description is always for ourself. - planned_self = True - pkg_timer.record("planning", logger=logger) - - # if we're in parsable mode don't display anything - # until after we finish planning for all children - if _parsable_version is None: - display_plan(_api_inst, [], _noexecute, - _omit_headers, _op, _parsable_version, - _quiet, _quiet_plan, _show_licenses, _stage, - _verbose) - - # if requested accept licenses for child images. we - # have to do this before recursing into children. - if _accept: - accept_plan_licenses(_api_inst) - except: - rv = __api_plan_exception(_op, _noexecute, _verbose, _api_inst) - if rv != EXIT_OK: - pkg_timer.record("planning", logger=logger) - return rv - - if not planned_self: - # if we got an exception we didn't do planning for children - pkg_timer.record("planning", logger=logger) - - elif _api_inst.isparent(_li_ignore): - # if we didn't get an exception and we're a parent image then - # we should have done planning for child images. - pkg_timer.record("planning children", logger=logger) - - # if we didn't display our own plan (due to an exception), or if we're - # in parsable mode, then display our plan now. - if not planned_self or _parsable_version is not None: - try: - display_plan(_api_inst, child_plans, _noexecute, - _omit_headers, _op, _parsable_version, _quiet, - _quiet_plan, _show_licenses, _stage, _verbose) - except api_errors.ApiException as e: - error(e, cmd=_op) - return EXIT_OOPS - - # if we didn't accept licenses (due to an exception) then do that now. - if not planned_self and _accept: + # if requested accept licenses for child images. we + # have to do this before recursing into children. + if _accept: accept_plan_licenses(_api_inst) + except: + rv = __api_plan_exception(_op, _noexecute, _verbose, _api_inst) + if rv != EXIT_OK: + pkg_timer.record("planning", logger=logger) + return rv + + if not planned_self: + # if we got an exception we didn't do planning for children + pkg_timer.record("planning", logger=logger) + + elif _api_inst.isparent(_li_ignore): + # if we didn't get an exception and we're a parent image then + # we should have done planning for child images. + pkg_timer.record("planning children", logger=logger) + + # if we didn't display our own plan (due to an exception), or if we're + # in parsable mode, then display our plan now. + if not planned_self or _parsable_version is not None: + try: + display_plan( + _api_inst, + child_plans, + _noexecute, + _omit_headers, + _op, + _parsable_version, + _quiet, + _quiet_plan, + _show_licenses, + _stage, + _verbose, + ) + except api_errors.ApiException as e: + error(e, cmd=_op) + return EXIT_OOPS + + # if we didn't accept licenses (due to an exception) then do that now. + if not planned_self and _accept: + accept_plan_licenses(_api_inst) + + return EXIT_OK - return EXIT_OK def __api_plan_file(api_inst): - """Return the path to the PlanDescription save file.""" + """Return the path to the PlanDescription save file.""" + + plandir = api_inst.img_plandir + return os.path.join(plandir, "plandesc") - plandir = api_inst.img_plandir - return os.path.join(plandir, "plandesc") def __api_plan_save(api_inst): - """Save an image plan to a file.""" + """Save an image plan to a file.""" + + # get a pointer to the plan + plan = api_inst.describe() + + # save the PlanDescription to a file + path = __api_plan_file(api_inst) + oflags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY + try: + fd = os.open(path, oflags, 0o644) + with os.fdopen(fd, "w") as fobj: + plan._save(fobj) + + # cleanup any old style imageplan save files + for f in os.listdir(api_inst.img_plandir): + path = os.path.join(api_inst.img_plandir, f) + if re.search(r"^actions\.[0-9]+\.json$", f): + os.unlink(path) + if re.search(r"^pkgs\.[0-9]+\.json$", f): + os.unlink(path) + except OSError as e: + raise api_errors._convert_error(e) - # get a pointer to the plan - plan = api_inst.describe() + pkg_timer.record("saving plan", logger=logger) - # save the PlanDescription to a file - path = __api_plan_file(api_inst) - oflags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY - try: - fd = os.open(path, oflags, 0o644) - with os.fdopen(fd, "w") as fobj: - plan._save(fobj) - - # cleanup any old style imageplan save files - for f in os.listdir(api_inst.img_plandir): - path = os.path.join(api_inst.img_plandir, f) - if re.search(r"^actions\.[0-9]+\.json$", f): - os.unlink(path) - if re.search(r"^pkgs\.[0-9]+\.json$", f): - os.unlink(path) - except OSError as e: - raise api_errors._convert_error(e) - - pkg_timer.record("saving plan", logger=logger) def __api_plan_load(api_inst, stage, origins): - """Loan an image plan from a file.""" + """Loan an image plan from a file.""" - # load an existing plan - path = __api_plan_file(api_inst) - plan = api.PlanDescription() - try: - with open(path) as fobj: - plan._load(fobj) - except OSError as e: - raise api_errors._convert_error(e) + # load an existing plan + path = __api_plan_file(api_inst) + plan = api.PlanDescription() + try: + with open(path) as fobj: + plan._load(fobj) + except OSError as e: + raise api_errors._convert_error(e) - pkg_timer.record("loading plan", logger=logger) + pkg_timer.record("loading plan", logger=logger) - api_inst.reset() - api_inst.set_alt_repos(origins) - api_inst.load_plan(plan, prepared=(stage == API_STAGE_EXECUTE)) - pkg_timer.record("re-initializing plan", logger=logger) + api_inst.reset() + api_inst.set_alt_repos(origins) + api_inst.load_plan(plan, prepared=(stage == API_STAGE_EXECUTE)) + pkg_timer.record("re-initializing plan", logger=logger) + + if stage == API_STAGE_EXECUTE: + __api_plan_delete(api_inst) - if stage == API_STAGE_EXECUTE: - __api_plan_delete(api_inst) def __api_plan_delete(api_inst): - """Delete an image plan file.""" + """Delete an image plan file.""" + + path = __api_plan_file(api_inst) + try: + os.unlink(path) + except OSError as e: + raise api_errors._convert_error(e) - path = __api_plan_file(api_inst) - try: - os.unlink(path) - except OSError as e: - raise api_errors._convert_error(e) def _verify_exit_code(api_inst): - """Determine the exit code of pkg verify, which should be based on - whether we find errors.""" - - plan = api_inst.describe() - for item_id, parent_id, msg_time, msg_level, msg_type, msg_text in \ - plan.gen_item_messages(): - if msg_type == MSG_ERROR: - return EXIT_OOPS - return EXIT_OK + """Determine the exit code of pkg verify, which should be based on + whether we find errors.""" + + plan = api_inst.describe() + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(): + if msg_type == MSG_ERROR: + return EXIT_OOPS + return EXIT_OK + + +def __api_op( + _op, + _api_inst, + _accept=False, + _li_ignore=None, + _noexecute=False, + _omit_headers=False, + _origins=None, + _parsable_version=None, + _quiet=False, + _quiet_plan=False, + _review_release_notes=False, + _show_licenses=False, + _stage=API_STAGE_DEFAULT, + _verbose=0, + **kwargs, +): + """Do something that involves the api. + + Arguments prefixed with '_' are primarily used within this + function. All other arguments must be specified via keyword + assignment and will be passed directly on to the api + interfaces being invoked.""" + + if _stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]: + # create a new plan + rv = __api_plan( + _op=_op, + _api_inst=_api_inst, + _accept=_accept, + _li_ignore=_li_ignore, + _noexecute=_noexecute, + _omit_headers=_omit_headers, + _origins=_origins, + _parsable_version=_parsable_version, + _quiet=_quiet, + _quiet_plan=_quiet_plan, + _review_release_notes=_review_release_notes, + _show_licenses=_show_licenses, + _stage=_stage, + _verbose=_verbose, + **kwargs, + ) -def __api_op(_op, _api_inst, _accept=False, _li_ignore=None, _noexecute=False, - _omit_headers=False, _origins=None, _parsable_version=None, _quiet=False, - _quiet_plan=False, _review_release_notes=False, _show_licenses=False, - _stage=API_STAGE_DEFAULT, _verbose=0, **kwargs): - """Do something that involves the api. - - Arguments prefixed with '_' are primarily used within this - function. All other arguments must be specified via keyword - assignment and will be passed directly on to the api - interfaces being invoked.""" - - if _stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]: - # create a new plan - rv = __api_plan(_op=_op, _api_inst=_api_inst, - _accept=_accept, _li_ignore=_li_ignore, - _noexecute=_noexecute, _omit_headers=_omit_headers, - _origins=_origins, _parsable_version=_parsable_version, - _quiet=_quiet, _quiet_plan=_quiet_plan, - _review_release_notes=_review_release_notes, - _show_licenses=_show_licenses, _stage=_stage, - _verbose=_verbose, **kwargs) - - if rv != EXIT_OK: - return rv - if not _noexecute and _stage == API_STAGE_PLAN: - # We always save the plan, even if it is a noop. We - # do this because we want to be able to verify that we - # can load and execute a noop plan. (This mimics - # normal api behavior which doesn't prevent an api - # consumer from creating a noop plan and then - # preparing and executing it.) - __api_plan_save(_api_inst) - # for pkg verify - if _op == PKG_OP_FIX and _noexecute and _quiet_plan: - return _verify_exit_code(_api_inst) - if _api_inst.planned_nothingtodo(): - return EXIT_NOP - if _noexecute or _stage == API_STAGE_PLAN: - return EXIT_OK - else: - assert _stage in [API_STAGE_PREPARE, API_STAGE_EXECUTE] - __api_plan_load(_api_inst, _stage, _origins) + if rv != EXIT_OK: + return rv + if not _noexecute and _stage == API_STAGE_PLAN: + # We always save the plan, even if it is a noop. We + # do this because we want to be able to verify that we + # can load and execute a noop plan. (This mimics + # normal api behavior which doesn't prevent an api + # consumer from creating a noop plan and then + # preparing and executing it.) + __api_plan_save(_api_inst) + # for pkg verify + if _op == PKG_OP_FIX and _noexecute and _quiet_plan: + return _verify_exit_code(_api_inst) + if _api_inst.planned_nothingtodo(): + return EXIT_NOP + if _noexecute or _stage == API_STAGE_PLAN: + return EXIT_OK + else: + assert _stage in [API_STAGE_PREPARE, API_STAGE_EXECUTE] + __api_plan_load(_api_inst, _stage, _origins) + + # Exceptions which happen here are printed in the above level, + # with or without some extra decoration done here. + if _stage in [API_STAGE_DEFAULT, API_STAGE_PREPARE]: + ret_code = __api_prepare_plan(_op, _api_inst) + pkg_timer.record("preparing", logger=logger) - # Exceptions which happen here are printed in the above level, - # with or without some extra decoration done here. - if _stage in [API_STAGE_DEFAULT, API_STAGE_PREPARE]: - ret_code = __api_prepare_plan(_op, _api_inst) - pkg_timer.record("preparing", logger=logger) + if ret_code != EXIT_OK: + return ret_code + if _stage == API_STAGE_PREPARE: + return EXIT_OK - if ret_code != EXIT_OK: - return ret_code - if _stage == API_STAGE_PREPARE: - return EXIT_OK + ret_code = __api_execute_plan(_op, _api_inst) + pkg_timer.record("executing", logger=logger) - ret_code = __api_execute_plan(_op, _api_inst) - pkg_timer.record("executing", logger=logger) + if ( + _review_release_notes + and ret_code == EXIT_OK + and _stage == API_STAGE_DEFAULT + and _api_inst.solaris_image() + ): + notes_block(misc.get_release_notes_url()) - if _review_release_notes and ret_code == EXIT_OK and \ - _stage == API_STAGE_DEFAULT and _api_inst.solaris_image(): - notes_block(misc.get_release_notes_url()) + return ret_code - return ret_code class RemoteDispatch(object): - """RPC Server Class which invoked by the PipedRPCServer when a RPC - request is recieved.""" - - def __dispatch(self, op, pwargs): - - pkg_timer.record("rpc dispatch wait", logger=logger) - - # if we were called with no arguments then pwargs will be [] - if pwargs == []: - pwargs = {} - - op_supported = [ - PKG_OP_AUDIT_LINKED, - PKG_OP_DETACH, - PKG_OP_PUBCHECK, - PKG_OP_SYNC, - PKG_OP_UPDATE, - PKG_OP_INSTALL, - PKG_OP_CHANGE_FACET, - PKG_OP_CHANGE_VARIANT, - PKG_OP_UNINSTALL, - PKG_OP_HOTFIX_CLEANUP - ] - if op not in op_supported: - raise Exception( - 'method "{0}" is not supported'.format(op)) - - # if a stage was specified, get it. - stage = pwargs.get("stage", API_STAGE_DEFAULT) - assert stage in api_stage_values + """RPC Server Class which invoked by the PipedRPCServer when a RPC + request is recieved.""" + + def __dispatch(self, op, pwargs): + pkg_timer.record("rpc dispatch wait", logger=logger) + + # if we were called with no arguments then pwargs will be [] + if pwargs == []: + pwargs = {} + + op_supported = [ + PKG_OP_AUDIT_LINKED, + PKG_OP_DETACH, + PKG_OP_PUBCHECK, + PKG_OP_SYNC, + PKG_OP_UPDATE, + PKG_OP_INSTALL, + PKG_OP_CHANGE_FACET, + PKG_OP_CHANGE_VARIANT, + PKG_OP_UNINSTALL, + PKG_OP_HOTFIX_CLEANUP, + ] + if op not in op_supported: + raise Exception('method "{0}" is not supported'.format(op)) - # if we're starting a new operation, reset the api. we do - # this just in case our parent updated our linked image - # metadata. - if stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]: - _api_inst.reset() + # if a stage was specified, get it. + stage = pwargs.get("stage", API_STAGE_DEFAULT) + assert stage in api_stage_values - if "pargs" not in pwargs: - pwargs["pargs"] = [] + # if we're starting a new operation, reset the api. we do + # this just in case our parent updated our linked image + # metadata. + if stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]: + _api_inst.reset() - op_func = cmds[op][0] + if "pargs" not in pwargs: + pwargs["pargs"] = [] - rv = op_func(op, _api_inst, **pwargs) + op_func = cmds[op][0] - if DebugValues["timings"]: - msg(str(pkg_timer)) - pkg_timer.reset() + rv = op_func(op, _api_inst, **pwargs) - return rv + if DebugValues["timings"]: + msg(str(pkg_timer)) + pkg_timer.reset() - def _dispatch(self, op, pwargs): - """Primary RPC dispatch function. + return rv - This function must be kept super simple because if we take an - exception here then no output will be generated and this - package remote process will silently exit with a non-zero - return value (and the lack of an exception message makes this - failure very difficult to debug). Hence we wrap the real - remote dispatch routine with a call to handle_errors(), which - will catch and display any exceptions encountered.""" + def _dispatch(self, op, pwargs): + """Primary RPC dispatch function. + + This function must be kept super simple because if we take an + exception here then no output will be generated and this + package remote process will silently exit with a non-zero + return value (and the lack of an exception message makes this + failure very difficult to debug). Hence we wrap the real + remote dispatch routine with a call to handle_errors(), which + will catch and display any exceptions encountered.""" + + # flush output before and after every operation. + misc.flush_output() + misc.truncate_file(sys.stdout) + misc.truncate_file(sys.stderr) + rv = handle_errors(self.__dispatch, True, op, pwargs) + misc.flush_output() + return rv - # flush output before and after every operation. - misc.flush_output() - misc.truncate_file(sys.stdout) - misc.truncate_file(sys.stderr) - rv = handle_errors(self.__dispatch, True, op, pwargs) - misc.flush_output() - return rv def remote(op, api_inst, pargs, ctlfd): - """Execute commands from a remote pipe""" - - # - # this is kinda a gross hack. SocketServer.py uses select.select() - # which doesn't support file descriptors larger than FD_SETSIZE. - # Since ctlfd may have been allocated in a parent process with many - # file descriptors, it may be larger than FD_SETSIZE. Here in the - # child, though, the majority of those have been closed, so os.dup() - # should return a lower-numbered descriptor which will work with - # select.select(). - # - ctlfd_new = os.dup(ctlfd) - os.close(ctlfd) - ctlfd = ctlfd_new - - rpc_server = pipeutils.PipedRPCServer(ctlfd) - rpc_server.register_introspection_functions() - rpc_server.register_instance(RemoteDispatch()) - - pkg_timer.record("rpc startup", logger=logger) - rpc_server.serve_forever() - -def change_variant(op, api_inst, pargs, - accept, act_timeout, backup_be, backup_be_name, be_activate, be_name, - li_ignore, li_parent_sync, li_erecurse, new_be, noexecute, origins, - parsable_version, quiet, refresh_catalogs, reject_pats, show_licenses, - stage, update_index, verbose): - """Attempt to change a variant associated with an image, updating - the image contents as necessary.""" - - xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) - if not xrval: - return EXIT_OOPS - - if not pargs: - usage(_("{0}: no variants specified").format(op)) - - variants = dict() - for arg in pargs: - # '=' is not allowed in variant names or values - if (len(arg.split('=')) != 2): - usage(_("{0}: variants must to be of the form " - "'='.").format(op)) - - # get the variant name and value - name, value = arg.split('=') - if not name.startswith("variant."): - name = "variant.{0}".format(name) - - # forcibly lowercase for 'true' or 'false' - if not value.islower() and value.lower() in ("true", "false"): - value = value.lower() - - # make sure the user didn't specify duplicate variants - if name in variants: - usage(_("{subcmd}: duplicate variant specified: " - "{variant}").format(subcmd=op, variant=name)) - variants[name] = value - - return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore, - _noexecute=noexecute, _origins=origins, - _parsable_version=parsable_version, _quiet=quiet, - _show_licenses=show_licenses, _stage=stage, _verbose=verbose, - act_timeout=act_timeout, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, li_erecurse=li_erecurse, - li_parent_sync=li_parent_sync, new_be=new_be, - refresh_catalogs=refresh_catalogs, reject_list=reject_pats, - update_index=update_index, variants=variants) - -def change_facet(op, api_inst, pargs, - accept, act_timeout, backup_be, backup_be_name, be_activate, be_name, - li_ignore, li_erecurse, li_parent_sync, new_be, noexecute, origins, - parsable_version, quiet, refresh_catalogs, reject_pats, show_licenses, - stage, update_index, verbose): - """Attempt to change the facets as specified, updating - image as necessary""" - - xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) - if not xrval: - return EXIT_OOPS - - if not pargs: - usage(_("{0}: no facets specified").format(op)) - - facets = {} - allowed_values = { - "TRUE" : True, - "FALSE": False, - "NONE" : None - } - - for arg in pargs: - - # '=' is not allowed in facet names or values - if (len(arg.split('=')) != 2): - usage(_("{0}: facets must to be of the form " - "'facet....=[True|False|None]'").format(op)) - - # get the facet name and value - name, value = arg.split('=') - if not name.startswith("facet."): - name = "facet." + name - - if value.upper() not in allowed_values: - usage(_("{0}: facets must to be of the form " - "'facet....=[True|False|None]'.").format(op)) - - facets[name] = allowed_values[value.upper()] + """Execute commands from a remote pipe""" + + # + # this is kinda a gross hack. SocketServer.py uses select.select() + # which doesn't support file descriptors larger than FD_SETSIZE. + # Since ctlfd may have been allocated in a parent process with many + # file descriptors, it may be larger than FD_SETSIZE. Here in the + # child, though, the majority of those have been closed, so os.dup() + # should return a lower-numbered descriptor which will work with + # select.select(). + # + ctlfd_new = os.dup(ctlfd) + os.close(ctlfd) + ctlfd = ctlfd_new + + rpc_server = pipeutils.PipedRPCServer(ctlfd) + rpc_server.register_introspection_functions() + rpc_server.register_instance(RemoteDispatch()) + + pkg_timer.record("rpc startup", logger=logger) + rpc_server.serve_forever() + + +def change_variant( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_parent_sync, + li_erecurse, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, +): + """Attempt to change a variant associated with an image, updating + the image contents as necessary.""" + + xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) + if not xrval: + return EXIT_OOPS + + if not pargs: + usage(_("{0}: no variants specified").format(op)) + + variants = dict() + for arg in pargs: + # '=' is not allowed in variant names or values + if len(arg.split("=")) != 2: + usage( + _( + "{0}: variants must to be of the form " "'='." + ).format(op) + ) + + # get the variant name and value + name, value = arg.split("=") + if not name.startswith("variant."): + name = "variant.{0}".format(name) + + # forcibly lowercase for 'true' or 'false' + if not value.islower() and value.lower() in ("true", "false"): + value = value.lower() + + # make sure the user didn't specify duplicate variants + if name in variants: + usage( + _("{subcmd}: duplicate variant specified: " "{variant}").format( + subcmd=op, variant=name + ) + ) + variants[name] = value + + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _parsable_version=parsable_version, + _quiet=quiet, + _show_licenses=show_licenses, + _stage=stage, + _verbose=verbose, + act_timeout=act_timeout, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + li_erecurse=li_erecurse, + li_parent_sync=li_parent_sync, + new_be=new_be, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + variants=variants, + ) + + +def change_facet( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, +): + """Attempt to change the facets as specified, updating + image as necessary""" + + xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) + if not xrval: + return EXIT_OOPS + + if not pargs: + usage(_("{0}: no facets specified").format(op)) + + facets = {} + allowed_values = {"TRUE": True, "FALSE": False, "NONE": None} + + for arg in pargs: + # '=' is not allowed in facet names or values + if len(arg.split("=")) != 2: + usage( + _( + "{0}: facets must to be of the form " + "'facet....=[True|False|None]'" + ).format(op) + ) + + # get the facet name and value + name, value = arg.split("=") + if not name.startswith("facet."): + name = "facet." + name + + if value.upper() not in allowed_values: + usage( + _( + "{0}: facets must to be of the form " + "'facet....=[True|False|None]'." + ).format(op) + ) + + facets[name] = allowed_values[value.upper()] + + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _parsable_version=parsable_version, + _quiet=quiet, + _show_licenses=show_licenses, + _stage=stage, + _verbose=verbose, + act_timeout=act_timeout, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + facets=facets, + li_erecurse=li_erecurse, + li_parent_sync=li_parent_sync, + new_be=new_be, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + ) - return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore, - _noexecute=noexecute, _origins=origins, - _parsable_version=parsable_version, _quiet=quiet, - _show_licenses=show_licenses, _stage=stage, _verbose=verbose, - act_timeout=act_timeout, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, facets=facets, li_erecurse=li_erecurse, - li_parent_sync=li_parent_sync, new_be=new_be, - refresh_catalogs=refresh_catalogs, reject_list=reject_pats, - update_index=update_index) def __handle_client_json_api_output(out_json, op, api_inst): - """This is the main client_json_api output handling function used for - install, update and uninstall and so on.""" + """This is the main client_json_api output handling function used for + install, update and uninstall and so on.""" - if "errors" in out_json: - _generate_error_messages(out_json["status"], - out_json["errors"], cmd=op) + if "errors" in out_json: + _generate_error_messages(out_json["status"], out_json["errors"], cmd=op) - if "data" in out_json and "release_notes_url" in out_json["data"]: - notes_block(out_json["data"]["release_notes_url"]) + if "data" in out_json and "release_notes_url" in out_json["data"]: + notes_block(out_json["data"]["release_notes_url"]) - if "data" in out_json and "repo_status" in out_json["data"]: - display_repo_failures(out_json["data"]["repo_status"]) + if "data" in out_json and "repo_status" in out_json["data"]: + display_repo_failures(out_json["data"]["repo_status"]) - __display_plan_messages(api_inst, frozenset([OP_STAGE_PREP, - OP_STAGE_EXEC])) - return out_json["status"] + __display_plan_messages(api_inst, frozenset([OP_STAGE_PREP, OP_STAGE_EXEC])) + return out_json["status"] -def _emit_error_general_cb(status, err, cmd=None, selected_type=[], - add_info=misc.EmptyDict): - """Callback for emitting general errors.""" - if status == EXIT_BADOPT: - # Usage errors are not in any specific type, print it only - # there is no selected type. - if not selected_type: - usage(err["reason"], cmd=cmd) - else: - return False - elif "errtype" in err: - if err["errtype"] == "format_update": - # if the selected_type is specified and err not in selected type, - # Don't print and return False. - if selected_type and err["errtype"] not in selected_type: - return False - emsg("\n") - emsg(err["reason"]) - emsg(_("To continue, execute 'pkg update-format' as a " - "privileged user and then try again. Please note " - "that updating the format of the image will render " - "it unusable with older versions of the pkg(7) " - "system.")) - elif err["errtype"] == "catalog_refresh": - if selected_type and err["errtype"] not in selected_type: - return False - - if "reason" in err: - emsg(err["reason"]) - elif "info" in err: - msg(err["info"]) - elif err["errtype"] == "catalog_refresh_failed": - if selected_type and err["errtype"] not in selected_type: - return False - - if "reason" in err: - emsg(" ") - emsg(err["reason"]) - elif err["errtype"] == "publisher_set": - if selected_type and err["errtype"] not in selected_type: - return False - - emsg(err["reason"]) - elif err["errtype"] == "plan_license": - if selected_type and err["errtype"] not in selected_type: - return False - - emsg(err["reason"]) - emsg(_("To indicate that you " - "agree to and accept the terms of the licenses of " - "the packages listed above, use the --accept " - "option. To display all of the related licenses, " - "use the --licenses option.")) - elif err["errtype"] in ["inventory", "inventory_extra"]: - if selected_type and err["errtype"] not in selected_type: - return False - - emsg(" ") - emsg(err["reason"]) - if err["errtype"] == "inventory_extra": - emsg("Use -af to allow all versions.") - elif err["errtype"] == "unsupported_repo_op": - if selected_type and err["errtype"] not in selected_type: - return False - - emsg(_(""" +def _emit_error_general_cb( + status, err, cmd=None, selected_type=[], add_info=misc.EmptyDict +): + """Callback for emitting general errors.""" + + if status == EXIT_BADOPT: + # Usage errors are not in any specific type, print it only + # there is no selected type. + if not selected_type: + usage(err["reason"], cmd=cmd) + else: + return False + elif "errtype" in err: + if err["errtype"] == "format_update": + # if the selected_type is specified and err not in selected type, + # Don't print and return False. + if selected_type and err["errtype"] not in selected_type: + return False + emsg("\n") + emsg(err["reason"]) + emsg( + _( + "To continue, execute 'pkg update-format' as a " + "privileged user and then try again. Please note " + "that updating the format of the image will render " + "it unusable with older versions of the pkg(7) " + "system." + ) + ) + elif err["errtype"] == "catalog_refresh": + if selected_type and err["errtype"] not in selected_type: + return False + + if "reason" in err: + emsg(err["reason"]) + elif "info" in err: + msg(err["info"]) + elif err["errtype"] == "catalog_refresh_failed": + if selected_type and err["errtype"] not in selected_type: + return False + + if "reason" in err: + emsg(" ") + emsg(err["reason"]) + elif err["errtype"] == "publisher_set": + if selected_type and err["errtype"] not in selected_type: + return False + + emsg(err["reason"]) + elif err["errtype"] == "plan_license": + if selected_type and err["errtype"] not in selected_type: + return False + + emsg(err["reason"]) + emsg( + _( + "To indicate that you " + "agree to and accept the terms of the licenses of " + "the packages listed above, use the --accept " + "option. To display all of the related licenses, " + "use the --licenses option." + ) + ) + elif err["errtype"] in ["inventory", "inventory_extra"]: + if selected_type and err["errtype"] not in selected_type: + return False + + emsg(" ") + emsg(err["reason"]) + if err["errtype"] == "inventory_extra": + emsg("Use -af to allow all versions.") + elif err["errtype"] == "unsupported_repo_op": + if selected_type and err["errtype"] not in selected_type: + return False + + emsg( + _( + """ To add a publisher using this repository, execute the following command as a privileged user: pkg set-publisher -g {0} -""").format(add_info["repo_uri"])) - elif "info" in err: - msg(err["info"]) - elif "reason" in err: - emsg(err["reason"]) - else: - if selected_type: - return False - - if "reason" in err: - emsg(err["reason"]) - elif "info" in err: - msg(err["info"]) - return True - -def _generate_error_messages(status, err_list, - msg_cb=_emit_error_general_cb, selected_type=[], cmd=None, - add_info=misc.EmptyDict): - """Generate error messages.""" - - errs_left = [err for err in err_list if not msg_cb(status, err, - selected_type=selected_type, cmd=cmd, add_info=add_info)] - # Return errors not being printed. - return errs_left - -def exact_install(op, api_inst, pargs, - accept, backup_be, backup_be_name, be_activate, be_name, li_ignore, - li_parent_sync, new_be, noexecute, origins, parsable_version, quiet, - refresh_catalogs, reject_pats, show_licenses, update_index, verbose): - """Attempt to take package specified to INSTALLED state. - The operands are interpreted as glob patterns.""" - - out_json = client_api._exact_install(op, api_inst, pargs, accept, - backup_be, backup_be_name, be_activate, be_name, li_ignore, - li_parent_sync, new_be, noexecute, origins, parsable_version, - quiet, refresh_catalogs, reject_pats, show_licenses, update_index, - verbose, display_plan_cb=display_plan_cb, logger=logger) - - return __handle_client_json_api_output(out_json, op, api_inst) - -def install(op, api_inst, pargs, - accept, act_timeout, backup_be, backup_be_name, be_activate, be_name, - li_ignore, li_erecurse, li_parent_sync, new_be, noexecute, origins, - parsable_version, quiet, refresh_catalogs, reject_pats, show_licenses, - stage, update_index, verbose): - """Attempt to take package specified to INSTALLED state. The operands - are interpreted as glob patterns.""" - - out_json = client_api._install(op, api_inst, pargs, - accept, act_timeout, backup_be, backup_be_name, be_activate, - be_name, li_ignore, li_erecurse, li_parent_sync, new_be, noexecute, - origins, parsable_version, quiet, refresh_catalogs, reject_pats, - show_licenses, stage, update_index, verbose, - display_plan_cb=display_plan_cb, logger=logger) - - return __handle_client_json_api_output(out_json, op, api_inst) - -def update(op, api_inst, pargs, accept, act_timeout, backup_be, backup_be_name, - be_activate, be_name, force, ignore_missing, li_ignore, li_erecurse, - li_parent_sync, new_be, noexecute, origins, parsable_version, quiet, - refresh_catalogs, reject_pats, show_licenses, stage, update_index, verbose): - """Attempt to take all installed packages specified to latest - version.""" - - if verbose > 1: - # Special mode where we determine the latest version of each - # requested package, and pass that explicitly to the backend - # to elicit more meaningful error messages to aid with - # troubleshooting. - - msg("Retrieving package list...") - - packages = client_api._list_inventory(op='list', - api_inst=api_inst, pargs=pargs, li_parent_sync=False, - list_all=False, list_installed_newest=False, - list_newest=True, list_upgradable=True, - origins=set([]), quiet=True, refresh_catalogs=False); - - msg("Retrieving list of packages to update...") - - updates = client_api._list_inventory(op='list', - api_inst=api_inst, pargs=pargs, li_parent_sync=False, - list_all=False, list_installed_newest=False, - list_newest=False, list_upgradable=True, - origins=set([]), quiet=True, refresh_catalogs=False); - - if "data" in updates and "data" in packages: - - # Build dictionary of packages to the latest version - latestver = { - 'pkg://{0}/{1}'.format(e['pub'], e['pkg']) : - e['version'] - for e in packages['data'] - } - - # Build list of packages to update - updatelist = [ 'pkg://{0}/{1}'.format( - e['pub'], e['pkg']) - for e in updates['data'] ] - - pargs = [ - '{}@{}'.format(u, latestver[u]) - for u in updatelist - if u in latestver - ] - - out_json = client_api._update(op, api_inst, pargs, accept, act_timeout, - backup_be, backup_be_name, be_activate, be_name, force, - ignore_missing, li_ignore, li_erecurse, li_parent_sync, new_be, - noexecute, origins, parsable_version, quiet, refresh_catalogs, - reject_pats, show_licenses, stage, update_index, verbose, - display_plan_cb=display_plan_cb, logger=logger) - - return __handle_client_json_api_output(out_json, op, api_inst) - -def apply_hot_fix(**args): - """Attempt to install updates from specified hot-fix""" - - if not args['pargs']: - usage(_("Source URL or file must be specified"), cmd=args['op']) - - err = hotfix_cleanup(op=None, api_inst=args['api_inst'], pargs=None) - if err != EXIT_OK: - return err - - origin = misc.parse_uri(args['pargs'].pop(0), cwd=orig_cwd) - - if args['verbose']: - msg("Resolved URL to: {0}".format(origin)) - - base = os.path.basename(origin) - if not base.endswith('.p5p'): base = base + '.p5p' - tmp_fd, tmp_pth = tempfile.mkstemp(prefix='pkg_hfa_', suffix="_" + base) - tmpfiles.append(tmp_pth) - - ###################################################################### - # Get list of installed packages - - if args['verbose']: - msg("Retrieving installed package list...") - - out_json = client_api._list_inventory(op=PKG_OP_LIST, - api_inst=args['api_inst'], pargs='', li_parent_sync=False, - list_all=False, list_installed_newest=False, list_newest=False, - list_upgradable=False, origins=set([]), quiet=True, - refresh_catalogs=False); - - if "data" in out_json: - pkglist = [ 'pkg://{0}/{1}'.format(entry['pub'], entry['pkg']) - for entry in out_json["data"] ] - else: - error("Could not retrieve installed package list.") - return EXIT_OOPS - - ###################################################################### - # Find hot-fix archive - - if origin.startswith("file:///"): - filepath = urlparse(origin, "file", allow_fragments=0)[2] - try: - shutil.copy2(unquote(filepath), tmp_pth) - except Exception as e: - error(e) - return EXIT_OOPS - origin = misc.parse_uri(tmp_pth, cwd=orig_cwd) - elif origin.startswith("http://") or origin.startswith("https://") or \ - origin.startswith("ftp://"): - # Download file to temporary area - - if not args['quiet']: - msg("Downloading hot-fix from {0}".format(origin)) - if args['verbose']: - msg(" -> {0}".format(tmp_pth)) - - with os.fdopen(tmp_fd, "wb") as fh: - hdl = pycurl.Curl() - hdl.setopt(pycurl.URL, origin) - hdl.setopt(pycurl.WRITEDATA, fh) - hdl.setopt(pycurl.FAILONERROR, 1) - hdl.setopt(pycurl.CONNECTTIMEOUT, - global_settings.PKG_CLIENT_CONNECT_TIMEOUT) - #hdl.setopt(pycurl.VERBOSE, True) - hdl.setopt(pycurl.USERAGENT, - misc.user_agent_str(None, 'hotfix')) - if args['verbose']: - hdl.setopt(pycurl.NOPROGRESS, False) - try: - hdl.perform() - except pycurl.error as err: - errno, errstr = err - msg("An error occurred: {0}".format(errstr)) - return - - origin = misc.parse_uri(tmp_pth, cwd=orig_cwd) - - if not args['quiet']: - msg("Download complete.\n") - else: - usage(_("Invalid URL"), cmd=args['op']) - - ###################################################################### - # Determine packages held within the .p5p archive - - if args['verbose']: - msg("Scanning package archive...") - - # Create a transport & transport config - repo_uri = publisher.RepositoryURI(origin) - tmp_dir = tempfile.mkdtemp() - incoming_dir = tempfile.mkdtemp() - cache_dir = tempfile.mkdtemp() - tmpdirs.extend([tmp_dir, incoming_dir, cache_dir]) - - xport, xport_cfg = transport.setup_transport() - xport_cfg.add_cache(cache_dir, readonly=False) - xport_cfg.incoming_root = incoming_dir - xport_cfg.pkg_root = tmp_dir - - # Configure target publisher. - xpub = transport.setup_publisher(origin, "target", xport, xport_cfg) - pub_data = xport.get_publisherdata(xpub) - - updatelist = [] - prefix = None - for p in pub_data: - # Refresh publisher data - p.repository = xpub.repository - p.meta_root = tempfile.mkdtemp() - tmpdirs.append(p.meta_root) - p.transport = xport - p.refresh(True, True) - if prefix and p.prefix != prefix: - error("Hot-fix contains packages from multiple publishers") - return EXIT_OOPS - prefix = p.prefix - - cat = p.catalog - for f, states, attrs in cat.gen_packages(pubs=[p.prefix], - return_fmris=True): - fmri = f.get_fmri(include_build=False) - try: - bfmri = fmri.split('@')[0] - if bfmri in pkglist: - updatelist.append(fmri) - elif args['verbose']: - msg(" {0} not installed, skipping." - .format(bfmri)) - except: - pass - - if args['verbose']: - for pkg in updatelist: - msg(" {0} will be updated.".format(pkg)) - msg("") +""" + ).format(add_info["repo_uri"]) + ) + elif "info" in err: + msg(err["info"]) + elif "reason" in err: + emsg(err["reason"]) + else: + if selected_type: + return False + + if "reason" in err: + emsg(err["reason"]) + elif "info" in err: + msg(err["info"]) + return True + + +def _generate_error_messages( + status, + err_list, + msg_cb=_emit_error_general_cb, + selected_type=[], + cmd=None, + add_info=misc.EmptyDict, +): + """Generate error messages.""" + + errs_left = [ + err + for err in err_list + if not msg_cb( + status, err, selected_type=selected_type, cmd=cmd, add_info=add_info + ) + ] + # Return errors not being printed. + return errs_left + + +def exact_install( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + update_index, + verbose, +): + """Attempt to take package specified to INSTALLED state. + The operands are interpreted as glob patterns.""" + + out_json = client_api._exact_install( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + update_index, + verbose, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + return __handle_client_json_api_output(out_json, op, api_inst) + + +def install( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, +): + """Attempt to take package specified to INSTALLED state. The operands + are interpreted as glob patterns.""" + + out_json = client_api._install( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + return __handle_client_json_api_output(out_json, op, api_inst) + + +def update( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + force, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, +): + """Attempt to take all installed packages specified to latest + version.""" + + if verbose > 1: + # Special mode where we determine the latest version of each + # requested package, and pass that explicitly to the backend + # to elicit more meaningful error messages to aid with + # troubleshooting. + + msg("Retrieving package list...") + + packages = client_api._list_inventory( + op="list", + api_inst=api_inst, + pargs=pargs, + li_parent_sync=False, + list_all=False, + list_installed_newest=False, + list_newest=True, + list_upgradable=True, + origins=set([]), + quiet=True, + refresh_catalogs=False, + ) - op = 'update' - if len(updatelist) < 1: - if not args['pargs']: - error("None of the packages in this hot-fix are installed.") - return EXIT_OOPS - op = 'install' - else: - args['pargs'] = updatelist - - ###################################################################### - # Add the hot-fix archive to the publisher - - pubargs = {} - - pubargs['api_inst'] = args['api_inst'] - pubargs['op'] = 'set-publisher' - pubargs['add_origins'] = set([origin]) - pubargs['pargs'] = [prefix] - - pubargs['ssl_key'] = None - pubargs['unset_ca_certs'] = [] - pubargs['approved_ca_certs'] = [] - pubargs['search_before'] = None - pubargs['sticky'] = None - pubargs['add_prop_values'] = {} - pubargs['revoked_ca_certs'] = [] - pubargs['search_first'] = False - pubargs['refresh_allowed'] = True - pubargs['add_mirrors'] = set([]) - pubargs['search_after'] = None - pubargs['disable'] = None - pubargs['set_props'] = {} - pubargs['reset_uuid'] = False - pubargs['remove_mirrors'] = set([]) - pubargs['ssl_cert'] = None - pubargs['remove_prop_values'] = {} - pubargs['proxy_uri'] = None - pubargs['origin_uri'] = None - pubargs['remove_origins'] = set([]) - pubargs['enable_origins'] = set([]) - pubargs['disable_origins'] = set([]) - pubargs['repo_uri'] = None - pubargs['unset_props'] = set([]) - pubargs['verbose'] = args['verbose'] - pubargs['li_erecurse'] = set([ - lin - for lin, rel, path in args['api_inst'].list_linked() - if rel == "child" - ]) - - publisher_set(**pubargs) - atexit.register(hotfix_cleanup, op=None, api_inst=args['api_inst'], - pargs=None) - - ###################################################################### - # Pass off to pkg update - - args['op'] = op - args['origins'] = set([]) - - # These are options for update which are not exposed for apply-hot-fix - # Set to default values for this transaction, except for 'force' which - # is true to allow hot-fixes to be applied to pkg itself. - args['parsable_version'] = None - args['accept'] = False - args['reject_pats'] = [] - args['act_timeout'] = 0 - args['refresh_catalogs'] = True - args['update_index'] = True - args['li_ignore'] = None - args['stage'] = 'default' - args['li_parent_sync'] = True - args['show_licenses'] = False - if op == 'update': - args['force'] = True - args['ignore_missing'] = False - - if op == 'install': - return install(**args) - else: - return update(**args) - -def uninstall(op, api_inst, pargs, - act_timeout, backup_be, backup_be_name, be_activate, be_name, - ignore_missing, li_ignore, li_erecurse, li_parent_sync, new_be, noexecute, - parsable_version, quiet, stage, update_index, verbose): - """Attempt to take package specified to DELETED state.""" - - out_json = client_api._uninstall(op, api_inst, pargs, - act_timeout, backup_be, backup_be_name, be_activate, be_name, - ignore_missing, li_ignore, li_erecurse, li_parent_sync, new_be, - noexecute, parsable_version, quiet, stage, update_index, verbose, - display_plan_cb=display_plan_cb, logger=logger) - - return __handle_client_json_api_output(out_json, op, api_inst) - -def autoremove(op, api_inst, pargs, - act_timeout, backup_be, backup_be_name, be_activate, be_name, - ignore_missing, li_ignore, li_erecurse, li_parent_sync, new_be, noexecute, - parsable_version, quiet, stage, update_index, verbose): - """Attempt to take automatically installed packages to DELETED state.""" - - if len(pargs): - usage(usage_error=_("No packages can be specified."), - cmd="autoremove") - - # Build pargs from the list of orphaned packages that were not - # manually installed. - - out_json = client_api._list_inventory(PKG_OP_LIST, api_inst, [], - li_parent_sync=False, list_all=False, - list_installed_newest=False, list_newest=False, - list_upgradable=False, origins=[], quiet=quiet, - refresh_catalogs=False, list_removable=True) - - errors = None - if "errors" in out_json: - _generate_error_messages(out_json["status"], - out_json["errors"]) - return out_json["status"] - - if "data" in out_json: - for entry in out_json["data"]: - if 'optional' in entry['states']: - continue - if 'manual' in entry['states']: - continue - - pargs.append(entry["pkg"]); - - if not pargs: - msg(_("No removable packages for this image.")) - return EXIT_NOP - - out_json = client_api._uninstall(PKG_OP_UNINSTALL, api_inst, pargs, - act_timeout, backup_be, backup_be_name, be_activate, be_name, - ignore_missing, li_ignore, li_erecurse, li_parent_sync, new_be, - noexecute, parsable_version, quiet, stage, update_index, verbose, - display_plan_cb=display_plan_cb, logger=logger) - - return __handle_client_json_api_output(out_json, op, api_inst) + msg("Retrieving list of packages to update...") + + updates = client_api._list_inventory( + op="list", + api_inst=api_inst, + pargs=pargs, + li_parent_sync=False, + list_all=False, + list_installed_newest=False, + list_newest=False, + list_upgradable=True, + origins=set([]), + quiet=True, + refresh_catalogs=False, + ) -def clean_image(api_inst, args): - opts, pargs = getopt.getopt(args, "v") - verbose = False - for opt, arg in opts: - if opt == "-v": - verbose = True - try: - api_inst.cleanup_cached_content(verbose=verbose) - return EXIT_OK - except: - return __api_plan_exception("clean", False, 0, api_inst) + if "data" in updates and "data" in packages: + # Build dictionary of packages to the latest version + latestver = { + "pkg://{0}/{1}".format(e["pub"], e["pkg"]): e["version"] + for e in packages["data"] + } + + # Build list of packages to update + updatelist = [ + "pkg://{0}/{1}".format(e["pub"], e["pkg"]) + for e in updates["data"] + ] + + pargs = [ + "{}@{}".format(u, latestver[u]) + for u in updatelist + if u in latestver + ] + + out_json = client_api._update( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + force, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + return __handle_client_json_api_output(out_json, op, api_inst) -def verify(op, api_inst, pargs, omit_headers, parsable_version, quiet, verbose, - unpackaged, unpackaged_only, verify_paths): - """Determine if installed packages match manifests.""" - out_json = client_api._verify(op, api_inst, pargs, omit_headers, - parsable_version, quiet, verbose, unpackaged, unpackaged_only, - display_plan_cb=display_plan_cb, logger=logger, - verify_paths=verify_paths) +def apply_hot_fix(**args): + """Attempt to install updates from specified hot-fix""" + + if not args["pargs"]: + usage(_("Source URL or file must be specified"), cmd=args["op"]) + + err = hotfix_cleanup(op=None, api_inst=args["api_inst"], pargs=None) + if err != EXIT_OK: + return err + + origin = misc.parse_uri(args["pargs"].pop(0), cwd=orig_cwd) + + if args["verbose"]: + msg("Resolved URL to: {0}".format(origin)) + + base = os.path.basename(origin) + if not base.endswith(".p5p"): + base = base + ".p5p" + tmp_fd, tmp_pth = tempfile.mkstemp(prefix="pkg_hfa_", suffix="_" + base) + tmpfiles.append(tmp_pth) + + ###################################################################### + # Get list of installed packages + + if args["verbose"]: + msg("Retrieving installed package list...") + + out_json = client_api._list_inventory( + op=PKG_OP_LIST, + api_inst=args["api_inst"], + pargs="", + li_parent_sync=False, + list_all=False, + list_installed_newest=False, + list_newest=False, + list_upgradable=False, + origins=set([]), + quiet=True, + refresh_catalogs=False, + ) + + if "data" in out_json: + pkglist = [ + "pkg://{0}/{1}".format(entry["pub"], entry["pkg"]) + for entry in out_json["data"] + ] + else: + error("Could not retrieve installed package list.") + return EXIT_OOPS - # Print error messages. - if "errors" in out_json: - _generate_error_messages(out_json["status"], - out_json["errors"], cmd=op) + ###################################################################### + # Find hot-fix archive - # Since the verify output has been handled by display_plan_cb, only - # status code needs to be returned. - return out_json["status"] + if origin.startswith("file:///"): + filepath = urlparse(origin, "file", allow_fragments=0)[2] + try: + shutil.copy2(unquote(filepath), tmp_pth) + except Exception as e: + error(e) + return EXIT_OOPS + origin = misc.parse_uri(tmp_pth, cwd=orig_cwd) + elif ( + origin.startswith("http://") + or origin.startswith("https://") + or origin.startswith("ftp://") + ): + # Download file to temporary area + + if not args["quiet"]: + msg("Downloading hot-fix from {0}".format(origin)) + if args["verbose"]: + msg(" -> {0}".format(tmp_pth)) + + with os.fdopen(tmp_fd, "wb") as fh: + hdl = pycurl.Curl() + hdl.setopt(pycurl.URL, origin) + hdl.setopt(pycurl.WRITEDATA, fh) + hdl.setopt(pycurl.FAILONERROR, 1) + hdl.setopt( + pycurl.CONNECTTIMEOUT, + global_settings.PKG_CLIENT_CONNECT_TIMEOUT, + ) + # hdl.setopt(pycurl.VERBOSE, True) + hdl.setopt(pycurl.USERAGENT, misc.user_agent_str(None, "hotfix")) + if args["verbose"]: + hdl.setopt(pycurl.NOPROGRESS, False) + try: + hdl.perform() + except pycurl.error as err: + errno, errstr = err + msg("An error occurred: {0}".format(errstr)) + return -def revert(op, api_inst, pargs, - backup_be, backup_be_name, be_activate, be_name, new_be, noexecute, - parsable_version, quiet, tagged, verbose): - """Attempt to revert files to their original state, either - via explicit path names or via tagged contents.""" + origin = misc.parse_uri(tmp_pth, cwd=orig_cwd) + + if not args["quiet"]: + msg("Download complete.\n") + else: + usage(_("Invalid URL"), cmd=args["op"]) + + ###################################################################### + # Determine packages held within the .p5p archive + + if args["verbose"]: + msg("Scanning package archive...") + + # Create a transport & transport config + repo_uri = publisher.RepositoryURI(origin) + tmp_dir = tempfile.mkdtemp() + incoming_dir = tempfile.mkdtemp() + cache_dir = tempfile.mkdtemp() + tmpdirs.extend([tmp_dir, incoming_dir, cache_dir]) + + xport, xport_cfg = transport.setup_transport() + xport_cfg.add_cache(cache_dir, readonly=False) + xport_cfg.incoming_root = incoming_dir + xport_cfg.pkg_root = tmp_dir + + # Configure target publisher. + xpub = transport.setup_publisher(origin, "target", xport, xport_cfg) + pub_data = xport.get_publisherdata(xpub) + + updatelist = [] + prefix = None + for p in pub_data: + # Refresh publisher data + p.repository = xpub.repository + p.meta_root = tempfile.mkdtemp() + tmpdirs.append(p.meta_root) + p.transport = xport + p.refresh(True, True) + if prefix and p.prefix != prefix: + error("Hot-fix contains packages from multiple publishers") + return EXIT_OOPS + prefix = p.prefix + + cat = p.catalog + for f, states, attrs in cat.gen_packages( + pubs=[p.prefix], return_fmris=True + ): + fmri = f.get_fmri(include_build=False) + try: + bfmri = fmri.split("@")[0] + if bfmri in pkglist: + updatelist.append(fmri) + elif args["verbose"]: + msg(" {0} not installed, skipping.".format(bfmri)) + except: + pass - if not pargs: - usage(_("at least one file path or tag name required"), cmd=op) + if args["verbose"]: + for pkg in updatelist: + msg(" {0} will be updated.".format(pkg)) + msg("") + + op = "update" + if len(updatelist) < 1: + if not args["pargs"]: + error("None of the packages in this hot-fix are installed.") + return EXIT_OOPS + op = "install" + else: + args["pargs"] = updatelist + + ###################################################################### + # Add the hot-fix archive to the publisher + + pubargs = {} + + pubargs["api_inst"] = args["api_inst"] + pubargs["op"] = "set-publisher" + pubargs["add_origins"] = set([origin]) + pubargs["pargs"] = [prefix] + + pubargs["ssl_key"] = None + pubargs["unset_ca_certs"] = [] + pubargs["approved_ca_certs"] = [] + pubargs["search_before"] = None + pubargs["sticky"] = None + pubargs["add_prop_values"] = {} + pubargs["revoked_ca_certs"] = [] + pubargs["search_first"] = False + pubargs["refresh_allowed"] = True + pubargs["add_mirrors"] = set([]) + pubargs["search_after"] = None + pubargs["disable"] = None + pubargs["set_props"] = {} + pubargs["reset_uuid"] = False + pubargs["remove_mirrors"] = set([]) + pubargs["ssl_cert"] = None + pubargs["remove_prop_values"] = {} + pubargs["proxy_uri"] = None + pubargs["origin_uri"] = None + pubargs["remove_origins"] = set([]) + pubargs["enable_origins"] = set([]) + pubargs["disable_origins"] = set([]) + pubargs["repo_uri"] = None + pubargs["unset_props"] = set([]) + pubargs["verbose"] = args["verbose"] + pubargs["li_erecurse"] = set( + [ + lin + for lin, rel, path in args["api_inst"].list_linked() + if rel == "child" + ] + ) + + publisher_set(**pubargs) + atexit.register( + hotfix_cleanup, op=None, api_inst=args["api_inst"], pargs=None + ) + + ###################################################################### + # Pass off to pkg update + + args["op"] = op + args["origins"] = set([]) + + # These are options for update which are not exposed for apply-hot-fix + # Set to default values for this transaction, except for 'force' which + # is true to allow hot-fixes to be applied to pkg itself. + args["parsable_version"] = None + args["accept"] = False + args["reject_pats"] = [] + args["act_timeout"] = 0 + args["refresh_catalogs"] = True + args["update_index"] = True + args["li_ignore"] = None + args["stage"] = "default" + args["li_parent_sync"] = True + args["show_licenses"] = False + if op == "update": + args["force"] = True + args["ignore_missing"] = False + + if op == "install": + return install(**args) + else: + return update(**args) + + +def uninstall( + op, + api_inst, + pargs, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + parsable_version, + quiet, + stage, + update_index, + verbose, +): + """Attempt to take package specified to DELETED state.""" + + out_json = client_api._uninstall( + op, + api_inst, + pargs, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + parsable_version, + quiet, + stage, + update_index, + verbose, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + return __handle_client_json_api_output(out_json, op, api_inst) + + +def autoremove( + op, + api_inst, + pargs, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + parsable_version, + quiet, + stage, + update_index, + verbose, +): + """Attempt to take automatically installed packages to DELETED state.""" + + if len(pargs): + usage(usage_error=_("No packages can be specified."), cmd="autoremove") + + # Build pargs from the list of orphaned packages that were not + # manually installed. + + out_json = client_api._list_inventory( + PKG_OP_LIST, + api_inst, + [], + li_parent_sync=False, + list_all=False, + list_installed_newest=False, + list_newest=False, + list_upgradable=False, + origins=[], + quiet=quiet, + refresh_catalogs=False, + list_removable=True, + ) + + errors = None + if "errors" in out_json: + _generate_error_messages(out_json["status"], out_json["errors"]) + return out_json["status"] - return __api_op(op, api_inst, _noexecute=noexecute, _quiet=quiet, - _verbose=verbose, backup_be=backup_be, be_activate=be_activate, - backup_be_name=backup_be_name, be_name=be_name, new_be=new_be, - _parsable_version=parsable_version, args=pargs, tagged=tagged) + if "data" in out_json: + for entry in out_json["data"]: + if "optional" in entry["states"]: + continue + if "manual" in entry["states"]: + continue -def dehydrate(op, api_inst, pargs, noexecute, publishers, quiet, verbose): - """Minimize image size for later redeployment.""" + pargs.append(entry["pkg"]) - return __api_op(op, api_inst, _noexecute=noexecute, _quiet=quiet, - _verbose=verbose, publishers=publishers) + if not pargs: + msg(_("No removable packages for this image.")) + return EXIT_NOP -def rehydrate(op, api_inst, pargs, noexecute, publishers, quiet, verbose): - """Restore content removed from a dehydrated image.""" + out_json = client_api._uninstall( + PKG_OP_UNINSTALL, + api_inst, + pargs, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + parsable_version, + quiet, + stage, + update_index, + verbose, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + return __handle_client_json_api_output(out_json, op, api_inst) - return __api_op(op, api_inst, _noexecute=noexecute, _quiet=quiet, - _verbose=verbose, publishers=publishers) -def fix(op, api_inst, pargs, accept, backup_be, backup_be_name, be_activate, - be_name, new_be, noexecute, omit_headers, parsable_version, quiet, - show_licenses, verbose, unpackaged): - """Fix packaging errors found in the image.""" +def clean_image(api_inst, args): + opts, pargs = getopt.getopt(args, "v") + verbose = False + for opt, arg in opts: + if opt == "-v": + verbose = True + try: + api_inst.cleanup_cached_content(verbose=verbose) + return EXIT_OK + except: + return __api_plan_exception("clean", False, 0, api_inst) - out_json = client_api._fix(op, api_inst, pargs, accept, backup_be, - backup_be_name, be_activate, be_name, new_be, noexecute, - omit_headers, parsable_version, quiet, show_licenses, verbose, - unpackaged, display_plan_cb=display_plan_cb, logger=logger) - # Print error messages. - if "errors" in out_json: - _generate_error_messages(out_json["status"], - out_json["errors"], cmd=op) +def verify( + op, + api_inst, + pargs, + omit_headers, + parsable_version, + quiet, + verbose, + unpackaged, + unpackaged_only, + verify_paths, +): + """Determine if installed packages match manifests.""" + + out_json = client_api._verify( + op, + api_inst, + pargs, + omit_headers, + parsable_version, + quiet, + verbose, + unpackaged, + unpackaged_only, + display_plan_cb=display_plan_cb, + logger=logger, + verify_paths=verify_paths, + ) + + # Print error messages. + if "errors" in out_json: + _generate_error_messages(out_json["status"], out_json["errors"], cmd=op) + + # Since the verify output has been handled by display_plan_cb, only + # status code needs to be returned. + return out_json["status"] + + +def revert( + op, + api_inst, + pargs, + backup_be, + backup_be_name, + be_activate, + be_name, + new_be, + noexecute, + parsable_version, + quiet, + tagged, + verbose, +): + """Attempt to revert files to their original state, either + via explicit path names or via tagged contents.""" + + if not pargs: + usage(_("at least one file path or tag name required"), cmd=op) + + return __api_op( + op, + api_inst, + _noexecute=noexecute, + _quiet=quiet, + _verbose=verbose, + backup_be=backup_be, + be_activate=be_activate, + backup_be_name=backup_be_name, + be_name=be_name, + new_be=new_be, + _parsable_version=parsable_version, + args=pargs, + tagged=tagged, + ) - return out_json["status"] -def list_mediators(op, api_inst, pargs, omit_headers, output_format, - list_available): - """Display configured or available mediator version(s) and - implementation(s).""" +def dehydrate(op, api_inst, pargs, noexecute, publishers, quiet, verbose): + """Minimize image size for later redeployment.""" - subcommand = "mediator" - if output_format is None: - output_format = "default" + return __api_op( + op, + api_inst, + _noexecute=noexecute, + _quiet=quiet, + _verbose=verbose, + publishers=publishers, + ) - # mediator information is returned as a dictionary of dictionaries - # of version and implementation indexed by mediator name. - mediations = collections.defaultdict(list) - if list_available: - gen_mediators = api_inst.gen_available_mediators() - else: - # Configured mediator information - gen_mediators = ( - (mediator, mediation) - for mediator, mediation in six.iteritems(api_inst.mediators) - ) - # Set minimum widths for mediator and version columns by using the - # length of the column headers and values to be displayed. - mediators = set() - max_mname_len = len(_("MEDIATOR")) - max_vsrc_len = len(_("VER. SRC.")) - max_version_len = len(_("VERSION")) - max_isrc_len = len(_("IMPL. SRC.")) - for mname, values in gen_mediators: - max_mname_len = max(max_mname_len, len(mname)) - med_version = values.get("version", "") - max_version_len = max(max_version_len, len(med_version)) - mediators.add(mname) - mediations[mname].append(values) - - requested_mediators = set(pargs) - if requested_mediators: - found = mediators & requested_mediators - notfound = requested_mediators - found - else: - found = mediators - notfound = set() - - def gen_listing(): - for mediator, mediation in ( - (mname, mentry) - for mname in sorted(found) - for mentry in mediations[mname] - ): - med_impl = mediation.get("implementation") - med_impl_ver = mediation.get("implementation-version") - if output_format == "default" and med_impl and \ - med_impl_ver: - med_impl += "(@{0})".format(med_impl_ver) - yield { - "mediator": mediator, - "version": mediation.get("version"), - "version-source": mediation.get("version-source"), - "implementation": med_impl, - "implementation-source": mediation.get( - "implementation-source"), - "implementation-version": med_impl_ver, - } - - # MEDIATOR VER. SRC. VERSION IMPL. SRC. IMPLEMENTATION IMPL. VER. - # - # - # ... - field_data = { - "mediator" : [("default", "json", "tsv"), _("MEDIATOR"), ""], - "version" : [("default", "json", "tsv"), _("VERSION"), ""], - "version-source": [("default", "json", "tsv"), _("VER. SRC."), ""], - "implementation" : [("default", "json", "tsv"), _("IMPLEMENTATION"), - ""], - "implementation-source": [("default", "json", "tsv"), - _("IMPL. SRC."), ""], - "implementation-version" : [("json", "tsv"), _("IMPL. VER."), ""], - } - desired_field_order = (_("MEDIATOR"), _("VER. SRC."), _("VERSION"), - _("IMPL. SRC."), _("IMPLEMENTATION"), _("IMPL. VER.")) - - # Default output formatting. - def_fmt = "{0:" + str(max_mname_len) + "} {1:" + str(max_vsrc_len) + \ - "} {2:" + str(max_version_len) + "} {3:" + str(max_isrc_len) + \ - "} {4}" - - if api_inst.get_dehydrated_publishers(): - msg(_("WARNING: pkg mediators may not be accurately shown " - "when one or more publishers have been dehydrated. The " - "correct mediation will be applied when the publishers " - "are rehydrated.")) - - if found or (not requested_mediators and output_format == "default"): - sys.stdout.write(misc.get_listing(desired_field_order, - field_data, gen_listing(), output_format, def_fmt, - omit_headers, escape_output=False)) - - if found and notfound: - return EXIT_PARTIAL - if requested_mediators and not found: - if output_format == "default": - # Don't pollute other output formats. - error(_("no matching mediators found"), - cmd=subcommand) - return EXIT_OOPS - return EXIT_OK +def rehydrate(op, api_inst, pargs, noexecute, publishers, quiet, verbose): + """Restore content removed from a dehydrated image.""" + + return __api_op( + op, + api_inst, + _noexecute=noexecute, + _quiet=quiet, + _verbose=verbose, + publishers=publishers, + ) + + +def fix( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + new_be, + noexecute, + omit_headers, + parsable_version, + quiet, + show_licenses, + verbose, + unpackaged, +): + """Fix packaging errors found in the image.""" + + out_json = client_api._fix( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + new_be, + noexecute, + omit_headers, + parsable_version, + quiet, + show_licenses, + verbose, + unpackaged, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + # Print error messages. + if "errors" in out_json: + _generate_error_messages(out_json["status"], out_json["errors"], cmd=op) + + return out_json["status"] + + +def list_mediators( + op, api_inst, pargs, omit_headers, output_format, list_available +): + """Display configured or available mediator version(s) and + implementation(s).""" + + subcommand = "mediator" + if output_format is None: + output_format = "default" + + # mediator information is returned as a dictionary of dictionaries + # of version and implementation indexed by mediator name. + mediations = collections.defaultdict(list) + if list_available: + gen_mediators = api_inst.gen_available_mediators() + else: + # Configured mediator information + gen_mediators = ( + (mediator, mediation) + for mediator, mediation in six.iteritems(api_inst.mediators) + ) -def set_mediator(op, api_inst, pargs, - backup_be, backup_be_name, be_activate, be_name, med_implementation, - med_version, new_be, noexecute, parsable_version, quiet, update_index, - verbose): - """Set the version and/or implementation for the specified - mediator(s).""" + # Set minimum widths for mediator and version columns by using the + # length of the column headers and values to be displayed. + mediators = set() + max_mname_len = len(_("MEDIATOR")) + max_vsrc_len = len(_("VER. SRC.")) + max_version_len = len(_("VERSION")) + max_isrc_len = len(_("IMPL. SRC.")) + for mname, values in gen_mediators: + max_mname_len = max(max_mname_len, len(mname)) + med_version = values.get("version", "") + max_version_len = max(max_version_len, len(med_version)) + mediators.add(mname) + mediations[mname].append(values) + + requested_mediators = set(pargs) + if requested_mediators: + found = mediators & requested_mediators + notfound = requested_mediators - found + else: + found = mediators + notfound = set() + + def gen_listing(): + for mediator, mediation in ( + (mname, mentry) + for mname in sorted(found) + for mentry in mediations[mname] + ): + med_impl = mediation.get("implementation") + med_impl_ver = mediation.get("implementation-version") + if output_format == "default" and med_impl and med_impl_ver: + med_impl += "(@{0})".format(med_impl_ver) + yield { + "mediator": mediator, + "version": mediation.get("version"), + "version-source": mediation.get("version-source"), + "implementation": med_impl, + "implementation-source": mediation.get("implementation-source"), + "implementation-version": med_impl_ver, + } + + # MEDIATOR VER. SRC. VERSION IMPL. SRC. IMPLEMENTATION IMPL. VER. + # + # + # ... + field_data = { + "mediator": [("default", "json", "tsv"), _("MEDIATOR"), ""], + "version": [("default", "json", "tsv"), _("VERSION"), ""], + "version-source": [("default", "json", "tsv"), _("VER. SRC."), ""], + "implementation": [("default", "json", "tsv"), _("IMPLEMENTATION"), ""], + "implementation-source": [ + ("default", "json", "tsv"), + _("IMPL. SRC."), + "", + ], + "implementation-version": [("json", "tsv"), _("IMPL. VER."), ""], + } + desired_field_order = ( + _("MEDIATOR"), + _("VER. SRC."), + _("VERSION"), + _("IMPL. SRC."), + _("IMPLEMENTATION"), + _("IMPL. VER."), + ) + + # Default output formatting. + def_fmt = ( + "{0:" + + str(max_mname_len) + + "} {1:" + + str(max_vsrc_len) + + "} {2:" + + str(max_version_len) + + "} {3:" + + str(max_isrc_len) + + "} {4}" + ) + + if api_inst.get_dehydrated_publishers(): + msg( + _( + "WARNING: pkg mediators may not be accurately shown " + "when one or more publishers have been dehydrated. The " + "correct mediation will be applied when the publishers " + "are rehydrated." + ) + ) - if not pargs: - usage(_("at least one mediator must be specified"), - cmd=op) - if not (med_version or med_implementation): - usage(_("a mediator version and/or implementation must be " - "specified using -V and -I"), cmd=op) + if found or (not requested_mediators and output_format == "default"): + sys.stdout.write( + misc.get_listing( + desired_field_order, + field_data, + gen_listing(), + output_format, + def_fmt, + omit_headers, + escape_output=False, + ) + ) - if verbose > 2: - DebugValues.set_value("plan", "True") - - # Now set version and/or implementation for all matching mediators. - # The user may specify 'None' as a special value to explicitly - # request mediations that do not have the related component. - mediators = collections.defaultdict(dict) - for m in pargs: - if med_version == "": - # Request reset of version. - mediators[m]["version"] = None - elif med_version == "None": - # Explicit selection of no version. - mediators[m]["version"] = "" - elif med_version: - mediators[m]["version"] = med_version - - if med_implementation == "": - # Request reset of implementation. - mediators[m]["implementation"] = None - elif med_implementation == "None": - # Explicit selection of no implementation. - mediators[m]["implementation"] = "" - elif med_implementation: - mediators[m]["implementation"] = med_implementation - - stuff_to_do = None - try: - for pd in api_inst.gen_plan_set_mediators(mediators, - noexecute=noexecute, backup_be=backup_be, - backup_be_name=backup_be_name, be_name=be_name, - new_be=new_be, be_activate=be_activate): - continue - stuff_to_do = not api_inst.planned_nothingtodo() - except: - ret_code = __api_plan_exception(op, api_inst, noexecute, - verbose) - if ret_code != EXIT_OK: - return ret_code - - if not stuff_to_do: - if verbose: - __display_plan(api_inst, verbose, noexecute) - if parsable_version is not None: - try: - __display_parsable_plan(api_inst, - parsable_version) - except api_errors.ApiException as e: - error(e, cmd=op) - return EXIT_OOPS - else: - msg(_("No changes required.")) - return EXIT_NOP + if found and notfound: + return EXIT_PARTIAL + if requested_mediators and not found: + if output_format == "default": + # Don't pollute other output formats. + error(_("no matching mediators found"), cmd=subcommand) + return EXIT_OOPS + return EXIT_OK + + +def set_mediator( + op, + api_inst, + pargs, + backup_be, + backup_be_name, + be_activate, + be_name, + med_implementation, + med_version, + new_be, + noexecute, + parsable_version, + quiet, + update_index, + verbose, +): + """Set the version and/or implementation for the specified + mediator(s).""" + + if not pargs: + usage(_("at least one mediator must be specified"), cmd=op) + if not (med_version or med_implementation): + usage( + _( + "a mediator version and/or implementation must be " + "specified using -V and -I" + ), + cmd=op, + ) - if api_inst.get_dehydrated_publishers(): - msg(_("WARNING: pkg mediators may not be accurately shown " - "when one or more publishers have been dehydrated. The " - "correct mediation will be applied when the publishers " - "are rehydrated.")) + if verbose > 2: + DebugValues.set_value("plan", "True") + + # Now set version and/or implementation for all matching mediators. + # The user may specify 'None' as a special value to explicitly + # request mediations that do not have the related component. + mediators = collections.defaultdict(dict) + for m in pargs: + if med_version == "": + # Request reset of version. + mediators[m]["version"] = None + elif med_version == "None": + # Explicit selection of no version. + mediators[m]["version"] = "" + elif med_version: + mediators[m]["version"] = med_version + + if med_implementation == "": + # Request reset of implementation. + mediators[m]["implementation"] = None + elif med_implementation == "None": + # Explicit selection of no implementation. + mediators[m]["implementation"] = "" + elif med_implementation: + mediators[m]["implementation"] = med_implementation + + stuff_to_do = None + try: + for pd in api_inst.gen_plan_set_mediators( + mediators, + noexecute=noexecute, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_name=be_name, + new_be=new_be, + be_activate=be_activate, + ): + continue + stuff_to_do = not api_inst.planned_nothingtodo() + except: + ret_code = __api_plan_exception(op, api_inst, noexecute, verbose) + if ret_code != EXIT_OK: + return ret_code - if not quiet: - __display_plan(api_inst, verbose, noexecute) + if not stuff_to_do: + if verbose: + __display_plan(api_inst, verbose, noexecute) if parsable_version is not None: - try: - __display_parsable_plan(api_inst, parsable_version) - except api_errors.ApiException as e: - error(e, cmd=op) - return EXIT_OOPS + try: + __display_parsable_plan(api_inst, parsable_version) + except api_errors.ApiException as e: + error(e, cmd=op) + return EXIT_OOPS + else: + msg(_("No changes required.")) + return EXIT_NOP - if noexecute: - return EXIT_OK + if api_inst.get_dehydrated_publishers(): + msg( + _( + "WARNING: pkg mediators may not be accurately shown " + "when one or more publishers have been dehydrated. The " + "correct mediation will be applied when the publishers " + "are rehydrated." + ) + ) - ret_code = __api_prepare_plan(op, api_inst) - if ret_code != EXIT_OK: - return ret_code + if not quiet: + __display_plan(api_inst, verbose, noexecute) + if parsable_version is not None: + try: + __display_parsable_plan(api_inst, parsable_version) + except api_errors.ApiException as e: + error(e, cmd=op) + return EXIT_OOPS - ret_code = __api_execute_plan(op, api_inst) + if noexecute: + return EXIT_OK + ret_code = __api_prepare_plan(op, api_inst) + if ret_code != EXIT_OK: return ret_code -def unset_mediator(op, api_inst, pargs, - backup_be, backup_be_name, be_activate, be_name, med_implementation, - med_version, new_be, noexecute, parsable_version, quiet, update_index, - verbose): - """Unset the version and/or implementation for the specified - mediator(s).""" + ret_code = __api_execute_plan(op, api_inst) + + return ret_code + + +def unset_mediator( + op, + api_inst, + pargs, + backup_be, + backup_be_name, + be_activate, + be_name, + med_implementation, + med_version, + new_be, + noexecute, + parsable_version, + quiet, + update_index, + verbose, +): + """Unset the version and/or implementation for the specified + mediator(s).""" + + if not pargs: + usage(_("at least one mediator must be specified"), cmd=op) + if verbose > 2: + DebugValues.set_value("plan", "True") + + # Build dictionary of mediators to unset based on input. + mediators = collections.defaultdict(dict) + if not (med_version or med_implementation): + # Unset both if nothing specific requested. + med_version = True + med_implementation = True + + # Now unset version and/or implementation for all matching mediators. + for m in pargs: + if med_version: + mediators[m]["version"] = None + if med_implementation: + mediators[m]["implementation"] = None + + stuff_to_do = None + try: + for pd in api_inst.gen_plan_set_mediators( + mediators, + noexecute=noexecute, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_name=be_name, + new_be=new_be, + be_activate=be_activate, + ): + continue + stuff_to_do = not api_inst.planned_nothingtodo() + except: + ret_code = __api_plan_exception(op, api_inst, noexecute, verbose) + if ret_code != EXIT_OK: + return ret_code - if not pargs: - usage(_("at least one mediator must be specified"), - cmd=op) - if verbose > 2: - DebugValues.set_value("plan", "True") - - # Build dictionary of mediators to unset based on input. - mediators = collections.defaultdict(dict) - if not (med_version or med_implementation): - # Unset both if nothing specific requested. - med_version = True - med_implementation = True - - # Now unset version and/or implementation for all matching mediators. - for m in pargs: - if med_version: - mediators[m]["version"] = None - if med_implementation: - mediators[m]["implementation"] = None - - stuff_to_do = None + if not stuff_to_do: + if verbose: + __display_plan(api_inst, verbose, noexecute) + if parsable_version is not None: + try: + __display_parsable_plan(api_inst, parsable_version) + except api_errors.ApiException as e: + error(e, cmd=op) + return EXIT_OOPS + else: + msg(_("No changes required.")) + return EXIT_NOP + + if not quiet: + __display_plan(api_inst, verbose, noexecute) + if parsable_version is not None: try: - for pd in api_inst.gen_plan_set_mediators(mediators, - noexecute=noexecute, backup_be=backup_be, - backup_be_name=backup_be_name, be_name=be_name, - new_be=new_be, be_activate=be_activate): - continue - stuff_to_do = not api_inst.planned_nothingtodo() - except: - ret_code = __api_plan_exception(op, api_inst, noexecute, - verbose) - if ret_code != EXIT_OK: - return ret_code - - if not stuff_to_do: - if verbose: - __display_plan(api_inst, verbose, noexecute) - if parsable_version is not None: - try: - __display_parsable_plan(api_inst, - parsable_version) - except api_errors.ApiException as e: - error(e, cmd=op) - return EXIT_OOPS - else: - msg(_("No changes required.")) - return EXIT_NOP + __display_parsable_plan(api_inst, parsable_version) + except api_errors.ApiException as e: + error(e, cmd=op) + return EXIT_OOPS - if not quiet: - __display_plan(api_inst, verbose, noexecute) - if parsable_version is not None: - try: - __display_parsable_plan(api_inst, parsable_version) - except api_errors.ApiException as e: - error(e, cmd=op) - return EXIT_OOPS + if noexecute: + return EXIT_OK - if noexecute: - return EXIT_OK + ret_code = __api_prepare_plan(op, api_inst) + if ret_code != EXIT_OK: + return ret_code - ret_code = __api_prepare_plan(op, api_inst) - if ret_code != EXIT_OK: - return ret_code + ret_code = __api_execute_plan(op, api_inst) - ret_code = __api_execute_plan(op, api_inst) + return ret_code - return ret_code def avoid(api_inst, args): - """Place the specified packages on the avoid list""" - if not args: - return __display_avoids(api_inst) + """Place the specified packages on the avoid list""" + if not args: + return __display_avoids(api_inst) + + try: + api_inst.avoid_pkgs(args) + return EXIT_OK + except: + return __api_plan_exception("avoid", False, 0, api_inst) - try: - api_inst.avoid_pkgs(args) - return EXIT_OK - except: - return __api_plan_exception("avoid", False, 0, api_inst) def unavoid(api_inst, args): - """Remove the specified packages from the avoid list""" - if not args: - return __display_avoids(api_inst) + """Remove the specified packages from the avoid list""" + if not args: + return __display_avoids(api_inst) + + try: + api_inst.avoid_pkgs(args, unavoid=True) + return EXIT_OK + except: + return __api_plan_exception("unavoid", False, 0, api_inst) - try: - api_inst.avoid_pkgs(args, unavoid=True) - return EXIT_OK - except: - return __api_plan_exception("unavoid", False, 0, api_inst) def __display_avoids(api_inst): - """Display the current avoid list, and the pkgs that are tracking - that pkg""" - for a in api_inst.get_avoid_list(): - tracking = " ".join(a[1]) - if tracking: - logger.info(_( - " {avoid_pkg} (group dependency of " - "'{tracking_pkg}')") - .format(avoid_pkg=a[0], tracking_pkg=tracking)) - else: - logger.info(" {0}".format(a[0])) + """Display the current avoid list, and the pkgs that are tracking + that pkg""" + for a in api_inst.get_avoid_list(): + tracking = " ".join(a[1]) + if tracking: + logger.info( + _( + " {avoid_pkg} (group dependency of " "'{tracking_pkg}')" + ).format(avoid_pkg=a[0], tracking_pkg=tracking) + ) + else: + logger.info(" {0}".format(a[0])) + + return EXIT_OK - return EXIT_OK def freeze(api_inst, args): - """Place the specified packages on the frozen list""" - - opts, pargs = getopt.getopt(args, "Hc:n") - comment = None - display_headers = True - dry_run = False - for opt, arg in opts: - if opt == "-H": - display_headers = False - elif opt == "-c": - comment = arg - elif opt == "-n": - dry_run = True - - if comment and not pargs: - usage(usage_error=_("At least one package to freeze must be " - "given when -c is used."), cmd="freeze") - if not display_headers and pargs: - usage(usage_error=_("-H may only be specified when listing the " - "currently frozen packages.")) - if not pargs: - return __display_cur_frozen(api_inst, display_headers) + """Place the specified packages on the frozen list""" + + opts, pargs = getopt.getopt(args, "Hc:n") + comment = None + display_headers = True + dry_run = False + for opt, arg in opts: + if opt == "-H": + display_headers = False + elif opt == "-c": + comment = arg + elif opt == "-n": + dry_run = True + + if comment and not pargs: + usage( + usage_error=_( + "At least one package to freeze must be " + "given when -c is used." + ), + cmd="freeze", + ) + if not display_headers and pargs: + usage( + usage_error=_( + "-H may only be specified when listing the " + "currently frozen packages." + ) + ) + if not pargs: + return __display_cur_frozen(api_inst, display_headers) + + try: + pfmris = api_inst.freeze_pkgs(pargs, dry_run=dry_run, comment=comment) + for pfmri in pfmris: + vertext = pfmri.version.get_short_version() + ts = pfmri.version.get_timestamp() + if ts: + vertext += ":" + pfmri.version.timestr + logger.info( + _("{name} was frozen at {ver}").format( + name=pfmri.pkg_name, ver=vertext + ) + ) + return EXIT_OK + except api_errors.FreezePkgsException as e: + error("\n{0}".format(e), cmd="freeze") + return EXIT_OOPS + except: + return __api_plan_exception("freeze", False, 0, api_inst) - try: - pfmris = api_inst.freeze_pkgs(pargs, dry_run=dry_run, - comment=comment) - for pfmri in pfmris: - vertext = pfmri.version.get_short_version() - ts = pfmri.version.get_timestamp() - if ts: - vertext += ":" + pfmri.version.timestr - logger.info(_("{name} was frozen at {ver}").format( - name=pfmri.pkg_name, ver=vertext)) - return EXIT_OK - except api_errors.FreezePkgsException as e: - error("\n{0}".format(e), cmd="freeze") - return EXIT_OOPS - except: - return __api_plan_exception("freeze", False, 0, api_inst) def unfreeze(api_inst, args): - """Remove the specified packages from the frozen list""" + """Remove the specified packages from the frozen list""" + + opts, pargs = getopt.getopt(args, "Hn") + display_headers = True + dry_run = False + for opt, arg in opts: + if opt == "-H": + display_headers = False + elif opt == "-n": + dry_run = True + + if not pargs: + return __display_cur_frozen(api_inst, display_headers) + + try: + pkgs = api_inst.freeze_pkgs(pargs, unfreeze=True, dry_run=dry_run) + if not pkgs: + return EXIT_NOP + for s in pkgs: + logger.info(_("{0} was unfrozen.").format(s)) + return EXIT_OK + except: + return __api_plan_exception("unfreeze", False, 0, api_inst) - opts, pargs = getopt.getopt(args, "Hn") - display_headers = True - dry_run = False - for opt, arg in opts: - if opt == "-H": - display_headers = False - elif opt == "-n": - dry_run = True - if not pargs: - return __display_cur_frozen(api_inst, display_headers) +def __display_cur_frozen(api_inst, display_headers): + """Display the current frozen list""" + + try: + lst = sorted(api_inst.get_frozen_list()) + except api_errors.ApiException as e: + error(e) + return EXIT_OOPS + if len(lst) == 0: + return EXIT_OK - try: - pkgs = api_inst.freeze_pkgs(pargs, unfreeze=True, - dry_run=dry_run) - if not pkgs: - return EXIT_NOP - for s in pkgs: - logger.info(_("{0} was unfrozen.").format(s)) - return EXIT_OK - except: - return __api_plan_exception("unfreeze", False, 0, api_inst) + fmt = "{name:18} {ver:27} {time:24} {comment}" + if display_headers: + logger.info( + fmt.format( + name=_("NAME"), + ver=_("VERSION"), + time=_("DATE"), + comment=_("COMMENT"), + ) + ) -def __display_cur_frozen(api_inst, display_headers): - """Display the current frozen list""" + for pfmri, comment, timestamp in lst: + vertext = pfmri.version.get_short_version() + ts = pfmri.version.get_timestamp() + if ts: + vertext += ":" + pfmri.version.timestr + if not comment: + comment = "None" + logger.info( + fmt.format( + name=pfmri.pkg_name, + comment=comment, + time=time.strftime( + "%d %b %Y %H:%M:%S %Z", time.localtime(timestamp) + ), + ver=vertext, + ) + ) + return EXIT_OK - try: - lst = sorted(api_inst.get_frozen_list()) - except api_errors.ApiException as e: - error(e) - return EXIT_OOPS - if len(lst) == 0: - return EXIT_OK - - fmt = "{name:18} {ver:27} {time:24} {comment}" - if display_headers: - logger.info(fmt.format( - name=_("NAME"), - ver=_("VERSION"), - time=_("DATE"), - comment=_("COMMENT") - )) - - for pfmri, comment, timestamp in lst: - vertext = pfmri.version.get_short_version() - ts = pfmri.version.get_timestamp() - if ts: - vertext += ":" + pfmri.version.timestr - if not comment: - comment = "None" - logger.info(fmt.format( - name=pfmri.pkg_name, - comment=comment, - time=time.strftime("%d %b %Y %H:%M:%S %Z", - time.localtime(timestamp)), - ver=vertext - )) - return EXIT_OK def __convert_output(a_str, match): - """Converts a string to a three tuple with the information to fill - the INDEX, ACTION, and VALUE columns. + """Converts a string to a three tuple with the information to fill + the INDEX, ACTION, and VALUE columns. + + The "a_str" parameter is the string representation of an action. - The "a_str" parameter is the string representation of an action. + The "match" parameter is a string whose precise interpretation is given + below. - The "match" parameter is a string whose precise interpretation is given - below. + For most action types, match defines which attribute the query matched + with. For example, it states whether the basename or path attribute of + a file action matched the query. Attribute (set) actions are treated + differently because they only have one attribute, and many values + associated with that attribute. For those actions, the match parameter + states which value matched the query.""" - For most action types, match defines which attribute the query matched - with. For example, it states whether the basename or path attribute of - a file action matched the query. Attribute (set) actions are treated - differently because they only have one attribute, and many values - associated with that attribute. For those actions, the match parameter - states which value matched the query.""" + a = actions.fromstr(a_str.rstrip()) + if isinstance(a, actions.attribute.AttributeAction): + return a.attrs.get(a.key_attr), a.name, match + return match, a.name, a.attrs.get(a.key_attr) - a = actions.fromstr(a_str.rstrip()) - if isinstance(a, actions.attribute.AttributeAction): - return a.attrs.get(a.key_attr), a.name, match - return match, a.name, a.attrs.get(a.key_attr) def produce_matching_token(action, match): - """Given an action and a match value (see convert_output for more - details on this parameter), return the token which matched the query.""" - - if isinstance(action, actions.attribute.AttributeAction): - return match - if match == "basename": - return action.attrs.get("path") - r = action.attrs.get(match) - if r: - return r - return action.attrs.get(action.key_attr) + """Given an action and a match value (see convert_output for more + details on this parameter), return the token which matched the query.""" + + if isinstance(action, actions.attribute.AttributeAction): + return match + if match == "basename": + return action.attrs.get("path") + r = action.attrs.get(match) + if r: + return r + return action.attrs.get(action.key_attr) + def produce_matching_type(action, match): - """Given an action and a match value (see convert_output for more - details on this parameter), return the kind of match this was. For - example, if the query matched a portion of a path of an action, this - will return 'path'. If the action is an attribute action, it returns - the name set in the action. """ + """Given an action and a match value (see convert_output for more + details on this parameter), return the kind of match this was. For + example, if the query matched a portion of a path of an action, this + will return 'path'. If the action is an attribute action, it returns + the name set in the action.""" + + if not isinstance(action, actions.attribute.AttributeAction): + return match + return action.attrs.get("name") - if not isinstance(action, actions.attribute.AttributeAction): - return match - return action.attrs.get("name") def v1_extract_info(tup, return_type, pub): - """Given a result from search, massages the information into a form - useful for pkg.misc.list_actions_by_attrs. + """Given a result from search, massages the information into a form + useful for pkg.misc.list_actions_by_attrs. - The "return_type" parameter is an enumeration that describes the type - of the information that will be converted. + The "return_type" parameter is an enumeration that describes the type + of the information that will be converted. - The type of the "tup" parameter depends on the value of "return_type". - If "return_type" is action information, "tup" is a three-tuple of the - fmri name, the match, and a string representation of the action. In - the case where "return_type" is package information, "tup" is a one- - tuple containing the fmri name. + The type of the "tup" parameter depends on the value of "return_type". + If "return_type" is action information, "tup" is a three-tuple of the + fmri name, the match, and a string representation of the action. In + the case where "return_type" is package information, "tup" is a one- + tuple containing the fmri name. - The "pub" parameter contains information about the publisher from which - the result was obtained.""" + The "pub" parameter contains information about the publisher from which + the result was obtained.""" - action = None - match = None - match_type = None + action = None + match = None + match_type = None + + if return_type == api.Query.RETURN_ACTIONS: + try: + pfmri, match, action = tup + except ValueError: + error( + _( + "The repository returned a malformed result.\n" + "The problematic structure:{0!r}" + ).format(tup) + ) + return False + try: + action = actions.fromstr(action.rstrip()) + except actions.ActionError as e: + error( + _( + "The repository returned an invalid or " + "unsupported action.\n{0}" + ).format(e) + ) + return False + match_type = produce_matching_type(action, match) + match = produce_matching_token(action, match) + else: + pfmri = tup + return pfmri, action, pub, match, match_type - if return_type == api.Query.RETURN_ACTIONS: - try: - pfmri, match, action = tup - except ValueError: - error(_("The repository returned a malformed result.\n" - "The problematic structure:{0!r}").format(tup)) - return False - try: - action = actions.fromstr(action.rstrip()) - except actions.ActionError as e: - error(_("The repository returned an invalid or " - "unsupported action.\n{0}").format(e)) - return False - match_type = produce_matching_type(action, match) - match = produce_matching_token(action, match) - else: - pfmri = tup - return pfmri, action, pub, match, match_type def search(api_inst, args): - """Search for the given query.""" - - # Constants which control the paging behavior for search output. - page_timeout = .5 - max_timeout = 5 - min_page_size = 5 - - search_attrs = valid_special_attrs[:] - search_attrs.extend(["search.match", "search.match_type"]) - - search_prefixes = valid_special_prefixes[:] - search_prefixes.extend(["search."]) - - opts, pargs = getopt.getopt(args, "Haflo:prs:I") - - default_attrs_action = ["search.match_type", "action.name", - "search.match", "pkg.shortfmri"] - - default_attrs_package = ["pkg.shortfmri", "pkg.publisher"] - - local = remote = case_sensitive = False - servers = [] - attrs = [] - - display_headers = True - prune_versions = True - return_actions = True - use_default_attrs = True - - for opt, arg in opts: - if opt == "-H": - display_headers = False - elif opt == "-a": - return_actions = True - elif opt == "-f": - prune_versions = False - elif opt == "-l": - local = True - elif opt == "-o": - attrs.extend(arg.split(",")) - use_default_attrs = False - elif opt == "-p": - return_actions = False - elif opt == "-r": - remote = True - elif opt == "-s": - remote = True - servers.append({ - "origin": misc.parse_uri(arg, cwd=orig_cwd) }) - elif opt == "-I": - case_sensitive = True - - if not local and not remote: - remote = True - - if not pargs: - usage(_("at least one search term must be provided"), - cmd="search") - - check_attrs(attrs, "search", reference=search_attrs, - prefixes=search_prefixes) - - action_attr = False - for a in attrs: - if a.startswith("action.") or a.startswith("search.match"): - action_attr = True - if not return_actions: - usage(_("action level options ('{0}') to -o " - "cannot be used with the -p " - "option").format(a), cmd="search") - break + """Search for the given query.""" + + # Constants which control the paging behavior for search output. + page_timeout = 0.5 + max_timeout = 5 + min_page_size = 5 + + search_attrs = valid_special_attrs[:] + search_attrs.extend(["search.match", "search.match_type"]) + + search_prefixes = valid_special_prefixes[:] + search_prefixes.extend(["search."]) + + opts, pargs = getopt.getopt(args, "Haflo:prs:I") + + default_attrs_action = [ + "search.match_type", + "action.name", + "search.match", + "pkg.shortfmri", + ] + + default_attrs_package = ["pkg.shortfmri", "pkg.publisher"] + + local = remote = case_sensitive = False + servers = [] + attrs = [] + + display_headers = True + prune_versions = True + return_actions = True + use_default_attrs = True + + for opt, arg in opts: + if opt == "-H": + display_headers = False + elif opt == "-a": + return_actions = True + elif opt == "-f": + prune_versions = False + elif opt == "-l": + local = True + elif opt == "-o": + attrs.extend(arg.split(",")) + use_default_attrs = False + elif opt == "-p": + return_actions = False + elif opt == "-r": + remote = True + elif opt == "-s": + remote = True + servers.append({"origin": misc.parse_uri(arg, cwd=orig_cwd)}) + elif opt == "-I": + case_sensitive = True + + if not local and not remote: + remote = True + + if not pargs: + usage(_("at least one search term must be provided"), cmd="search") + + check_attrs( + attrs, "search", reference=search_attrs, prefixes=search_prefixes + ) + + action_attr = False + for a in attrs: + if a.startswith("action.") or a.startswith("search.match"): + action_attr = True + if not return_actions: + usage( + _( + "action level options ('{0}') to -o " + "cannot be used with the -p " + "option" + ).format(a), + cmd="search", + ) + break - searches = [] + searches = [] - # Strip pkg:/ or pkg:/// from the fmri. - # If fmri has pkg:// then strip the prefix - # from 'pkg://' upto the first slash. + # Strip pkg:/ or pkg:/// from the fmri. + # If fmri has pkg:// then strip the prefix + # from 'pkg://' upto the first slash. - qtext = re.sub(r"pkg:///|pkg://[^/]*/|pkg:/", "", " ".join(pargs)) + qtext = re.sub(r"pkg:///|pkg://[^/]*/|pkg:/", "", " ".join(pargs)) - try: - query = [api.Query(qtext, case_sensitive, - return_actions)] - except api_errors.BooleanQueryException as e: - error(e) - return EXIT_OOPS - except api_errors.ParseError as e: - error(e) - return EXIT_OOPS + try: + query = [api.Query(qtext, case_sensitive, return_actions)] + except api_errors.BooleanQueryException as e: + error(e) + return EXIT_OOPS + except api_errors.ParseError as e: + error(e) + return EXIT_OOPS - good_res = False - bad_res = False + good_res = False + bad_res = False - try: - if local: - searches.append(api_inst.local_search(query)) - if remote: - searches.append(api_inst.remote_search(query, - servers=servers, prune_versions=prune_versions)) - # By default assume we don't find anything. - retcode = EXIT_OOPS - - # get initial set of results - justs = calc_justs(attrs) - page_again = True - widths = [] - st = None - err = None - header_attrs = attrs - last_line = None - shown_headers = False - while page_again: - unprocessed_res = [] - page_again = False - # Indexless search raises a slow search exception. In - # that case, catch the exception, finish processing the - # results, then propogate the error. - try: - for raw_value in itertools.chain(*searches): - if not st: - st = time.time() - try: - query_num, pub, \ - (v, return_type, tmp) = \ - raw_value - except ValueError as e: - error(_("The repository " - "returned a malformed " - "result:{0!r}").format( - raw_value)) - bad_res = True - continue - # This check is necessary since a - # a pacakge search can be specified - # using the <> operator. - if action_attr and \ - return_type != \ - api.Query.RETURN_ACTIONS: - usage(_("action level options " - "to -o cannot be used with " - "the queries that return " - "packages"), cmd="search") - if use_default_attrs and not justs: - if return_type == \ - api.Query.RETURN_ACTIONS: - attrs = \ - default_attrs_action - header_attrs = \ - ["index", "action", - "value", "package"] - else: - attrs = default_attrs_package - header_attrs = \ - ["package", - "publisher"] - justs = calc_justs(attrs) - ret = v1_extract_info( - tmp, return_type, pub) - bad_res |= isinstance(ret, bool) - if ret: - good_res = True - unprocessed_res.append(ret) - # Check whether the paging timeout - # should be increased. - if time.time() - st > page_timeout: - if len(unprocessed_res) > \ - min_page_size: - page_again = True - break - else: - page_timeout = min( - page_timeout * 2, - max_timeout) - except api_errors.ApiException as e: - err = e - lines = list(misc.list_actions_by_attrs(unprocessed_res, - attrs, show_all=True, remove_consec_dup_lines=True, - last_res=last_line)) - if not lines: - continue - old_widths = widths[:] - widths = calc_widths(lines, attrs, widths) - # If headers are being displayed and the layout of the - # columns have changed, print the headers again using - # the new widths. - if display_headers and (not shown_headers or - old_widths[:-1] != widths[:-1]): - shown_headers = True - print_headers(header_attrs, widths, justs) - for line in lines: - msg((create_output_format(display_headers, - widths, justs, line).format( - *line)).rstrip()) - last_line = line - st = time.time() - if err: - raise err + try: + if local: + searches.append(api_inst.local_search(query)) + if remote: + searches.append( + api_inst.remote_search( + query, servers=servers, prune_versions=prune_versions + ) + ) + # By default assume we don't find anything. + retcode = EXIT_OOPS + # get initial set of results + justs = calc_justs(attrs) + page_again = True + widths = [] + st = None + err = None + header_attrs = attrs + last_line = None + shown_headers = False + while page_again: + unprocessed_res = [] + page_again = False + # Indexless search raises a slow search exception. In + # that case, catch the exception, finish processing the + # results, then propogate the error. + try: + for raw_value in itertools.chain(*searches): + if not st: + st = time.time() + try: + query_num, pub, (v, return_type, tmp) = raw_value + except ValueError as e: + error( + _( + "The repository " + "returned a malformed " + "result:{0!r}" + ).format(raw_value) + ) + bad_res = True + continue + # This check is necessary since a + # a pacakge search can be specified + # using the <> operator. + if action_attr and return_type != api.Query.RETURN_ACTIONS: + usage( + _( + "action level options " + "to -o cannot be used with " + "the queries that return " + "packages" + ), + cmd="search", + ) + if use_default_attrs and not justs: + if return_type == api.Query.RETURN_ACTIONS: + attrs = default_attrs_action + header_attrs = [ + "index", + "action", + "value", + "package", + ] + else: + attrs = default_attrs_package + header_attrs = ["package", "publisher"] + justs = calc_justs(attrs) + ret = v1_extract_info(tmp, return_type, pub) + bad_res |= isinstance(ret, bool) + if ret: + good_res = True + unprocessed_res.append(ret) + # Check whether the paging timeout + # should be increased. + if time.time() - st > page_timeout: + if len(unprocessed_res) > min_page_size: + page_again = True + break + else: + page_timeout = min(page_timeout * 2, max_timeout) + except api_errors.ApiException as e: + err = e + lines = list( + misc.list_actions_by_attrs( + unprocessed_res, + attrs, + show_all=True, + remove_consec_dup_lines=True, + last_res=last_line, + ) + ) + if not lines: + continue + old_widths = widths[:] + widths = calc_widths(lines, attrs, widths) + # If headers are being displayed and the layout of the + # columns have changed, print the headers again using + # the new widths. + if display_headers and ( + not shown_headers or old_widths[:-1] != widths[:-1] + ): + shown_headers = True + print_headers(header_attrs, widths, justs) + for line in lines: + msg( + ( + create_output_format( + display_headers, widths, justs, line + ).format(*line) + ).rstrip() + ) + last_line = line + st = time.time() + if err: + raise err + + except ( + api_errors.IncorrectIndexFileHash, + api_errors.InconsistentIndexException, + ): + error( + _( + "The search index appears corrupted. Please " + "rebuild the index with 'pkg rebuild-index'." + ) + ) + return EXIT_OOPS + except api_errors.ProblematicSearchServers as e: + error(e) + bad_res = True + except api_errors.SlowSearchUsed as e: + error(e) + except ( + api_errors.IncorrectIndexFileHash, + api_errors.InconsistentIndexException, + ): + error( + _( + "The search index appears corrupted. Please " + "rebuild the index with 'pkg rebuild-index'." + ) + ) + return EXIT_OOPS + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.ApiException as e: + error(e) + return EXIT_OOPS + if good_res and bad_res: + retcode = EXIT_PARTIAL + elif bad_res: + retcode = EXIT_OOPS + elif good_res: + retcode = EXIT_OK + return retcode - except (api_errors.IncorrectIndexFileHash, - api_errors.InconsistentIndexException): - error(_("The search index appears corrupted. Please " - "rebuild the index with 'pkg rebuild-index'.")) - return EXIT_OOPS - except api_errors.ProblematicSearchServers as e: - error(e) - bad_res = True - except api_errors.SlowSearchUsed as e: - error(e) - except (api_errors.IncorrectIndexFileHash, - api_errors.InconsistentIndexException): - error(_("The search index appears corrupted. Please " - "rebuild the index with 'pkg rebuild-index'.")) - return EXIT_OOPS - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.ApiException as e: - error(e) - return EXIT_OOPS - if good_res and bad_res: - retcode = EXIT_PARTIAL - elif bad_res: - retcode = EXIT_OOPS - elif good_res: - retcode = EXIT_OK - return retcode def flag(api_inst, args): - """Flag/unflag installed packages in the local image. - """ - - opts, pargs = getopt.getopt(args, "Mm") - flag = None - for opt, arg in opts: - if opt == "-M": - flag = 'manual' - value = False - if opt == "-m": - flag = 'manual' - value = True - - if not flag: - usage(usage_error="One of -m or -M must be provided.", - cmd="flag") - - if not len(pargs): - usage(usage_error=_("At least one package to update must be " - "provided."), cmd="flag") + """Flag/unflag installed packages in the local image.""" + + opts, pargs = getopt.getopt(args, "Mm") + flag = None + for opt, arg in opts: + if opt == "-M": + flag = "manual" + value = False + if opt == "-m": + flag = "manual" + value = True + + if not flag: + usage(usage_error="One of -m or -M must be provided.", cmd="flag") + + if not len(pargs): + usage( + usage_error=_( + "At least one package to update must be " "provided." + ), + cmd="flag", + ) + + try: + api_inst.flag_pkgs(pargs, flag=flag, value=value) + return EXIT_OK + except: + return __api_plan_exception("flag", False, 0, api_inst) + + +def info( + op, + api_inst, + pargs, + display_license, + info_local, + info_remote, + origins, + quiet, +): + """Display information about a package or packages.""" + + ret_json = client_api._info( + op, + api_inst, + pargs, + display_license, + info_local, + info_remote, + origins, + quiet, + ) + + if "data" in ret_json: + # display_license is true. + if "licenses" in ret_json["data"]: + data_type = "licenses" + elif "package_attrs" in ret_json["data"]: + data_type = "package_attrs" + + for i, pis in enumerate(ret_json["data"][data_type]): + if not quiet and i > 0: + msg("") + + if display_license and not quiet: + for lic in pis[1]: + msg(lic) + continue + + try: + max_width = max(len(attr[0]) for attr in pis) + except ValueError: + # Only display header if there are + # other attributes to show. + continue + for attr_l in pis: + attr, kval = tuple(attr_l) + label = "{0}: ".format(attr.rjust(max_width)) + res = "\n".join(item for item in kval) + if res: + wrapper = textwrap.TextWrapper( + initial_indent=label, + break_on_hyphens=False, + break_long_words=False, + subsequent_indent=(max_width + 2) * " ", + width=80, + ) + msg(wrapper.fill(res)) + + if "errors" in ret_json: + _generate_error_messages( + ret_json["status"], ret_json["errors"], cmd="info" + ) + + return ret_json["status"] - try: - api_inst.flag_pkgs(pargs, flag=flag, value=value) - return EXIT_OK - except: - return __api_plan_exception("flag", False, 0, api_inst) - -def info(op, api_inst, pargs, display_license, info_local, info_remote, - origins, quiet): - """Display information about a package or packages. - """ - - ret_json = client_api._info(op, api_inst, pargs, display_license, - info_local, info_remote, origins, quiet) - - if "data" in ret_json: - # display_license is true. - if "licenses" in ret_json["data"]: - data_type = "licenses" - elif "package_attrs" in ret_json["data"]: - data_type = "package_attrs" - - for i, pis in enumerate(ret_json["data"][data_type]): - if not quiet and i > 0: - msg("") - - if display_license and not quiet: - for lic in pis[1]: - msg(lic) - continue - - try: - max_width = max( - len(attr[0]) - for attr in pis - ) - except ValueError: - # Only display header if there are - # other attributes to show. - continue - for attr_l in pis: - attr, kval = tuple(attr_l) - label = "{0}: ".format(attr.rjust(max_width)) - res = "\n".join(item for item in kval) - if res: - wrapper = textwrap.TextWrapper( - initial_indent=label, - break_on_hyphens=False, - break_long_words=False, - subsequent_indent=(max_width + 2) \ - * " ", width=80) - msg(wrapper.fill(res)) - - if "errors" in ret_json: - _generate_error_messages(ret_json["status"], ret_json["errors"], - cmd="info") - - return ret_json["status"] def calc_widths(lines, attrs, widths=None): - """Given a set of lines and a set of attributes, calculate the minimum - width each column needs to hold its contents.""" + """Given a set of lines and a set of attributes, calculate the minimum + width each column needs to hold its contents.""" + + if not widths: + widths = [len(attr) - attr.find(".") - 1 for attr in attrs] + for l in lines: + for i, a in enumerate(l): + if len(str(a)) > widths[i]: + widths[i] = len(str(a)) + return widths - if not widths: - widths = [ len(attr) - attr.find(".") - 1 for attr in attrs ] - for l in lines: - for i, a in enumerate(l): - if len(str(a)) > widths[i]: - widths[i] = len(str(a)) - return widths def calc_justs(attrs): - """Given a set of output attributes, find any attributes with known - justification directions and assign them.""" + """Given a set of output attributes, find any attributes with known + justification directions and assign them.""" + + def __chose_just(attr): + if attr in [ + "action.name", + "action.key", + "action.raw", + "pkg.name", + "pkg.fmri", + "pkg.shortfmri", + "pkg.publisher", + ]: + return JUST_LEFT + return JUST_UNKNOWN + + return [__chose_just(attr) for attr in attrs] - def __chose_just(attr): - if attr in ["action.name", "action.key", "action.raw", - "pkg.name", "pkg.fmri", "pkg.shortfmri", "pkg.publisher"]: - return JUST_LEFT - return JUST_UNKNOWN - return [ __chose_just(attr) for attr in attrs ] def default_left(v): - """For a given justification "v", use the default of left justification - if "v" is JUST_UNKNOWN.""" + """For a given justification "v", use the default of left justification + if "v" is JUST_UNKNOWN.""" + + if v == JUST_UNKNOWN: + return JUST_LEFT + return v - if v == JUST_UNKNOWN: - return JUST_LEFT - return v def print_headers(attrs, widths, justs): - """Print out the headers for the columns in the output. + """Print out the headers for the columns in the output. + + The "attrs" parameter provides the headings that should be used. + + The "widths" parameter provides the current estimates of the width + for each column. These may be changed due to the length of the headers. + This function does modify the values contained in "widths" outside this + function. + + The "justs" parameter contains the justifications to use with each + header.""" + + headers = [] + for i, attr in enumerate(attrs): + headers.append(str(attr.upper())) + widths[i] = max(widths[i], len(attr)) + + # Now that we know all the widths, multiply them by the + # justification values to get positive or negative numbers to + # pass to the format specifier. + widths = [e[0] * default_left(e[1]) for e in zip(widths, justs)] + fmt = "" + for n in range(len(widths)): + if widths[n] < 0: + fmt += "{{{0}:<{1:d}}} ".format(n, -widths[n]) + else: + fmt += "{{{0}:>{1:d}}} ".format(n, widths[n]) - The "attrs" parameter provides the headings that should be used. + msg(fmt.format(*headers).rstrip()) - The "widths" parameter provides the current estimates of the width - for each column. These may be changed due to the length of the headers. - This function does modify the values contained in "widths" outside this - function. - The "justs" parameter contains the justifications to use with each - header.""" +def guess_unknown(j, v): + """If the justificaton to use for a value is unknown, assume that if + it is an integer, the output should be right justified, otherwise it + should be left justified.""" - headers = [] - for i, attr in enumerate(attrs): - headers.append(str(attr.upper())) - widths[i] = max(widths[i], len(attr)) + if j != JUST_UNKNOWN: + return j + try: + int(v) + return JUST_RIGHT + except (ValueError, TypeError): + # attribute is non-numeric or is something like + # a list. + return JUST_LEFT + + +def create_output_format(display_headers, widths, justs, line): + """Produce a format string that can be used to display results. + + The "display_headers" parameter is whether headers have been displayed + or not. If they have not, then use a simple tab system. If they + have, use the information in the other parameters to control the + formatting of the line. + + The "widths" parameter contains the width to use for each column. + The "justs" parameter contains the justifications to use for each + column. + + The "line" parameter contains the information that will be displayed + using the resulting format. It's needed so that a choice can be made + about columns with unknown justifications. + """ + + fmt = "" + if display_headers: # Now that we know all the widths, multiply them by the # justification values to get positive or negative numbers to # pass to the format specifier. - widths = [ e[0] * default_left(e[1]) for e in zip(widths, justs) ] - fmt = "" - for n in range(len(widths)): - if widths[n] < 0: - fmt += "{{{0}:<{1:d}}} ".format(n, -widths[n]) - else: - fmt += "{{{0}:>{1:d}}} ".format(n, widths[n]) + line_widths = [ + w * guess_unknown(j, a) for w, j, a in zip(widths, justs, line) + ] + for n in range(len(line_widths)): + if line_widths[n] < 0: + fmt += "{{{0}!s:<{1}}} ".format(n, -line_widths[n]) + else: + fmt += "{{{0}!s:>{1}}} ".format(n, line_widths[n]) + return fmt + for n in range(len(widths)): + fmt += "{{{0}!s}}\t".format(n) + fmt.rstrip("\t") + return fmt - msg(fmt.format(*headers).rstrip()) -def guess_unknown(j, v): - """If the justificaton to use for a value is unknown, assume that if - it is an integer, the output should be right justified, otherwise it - should be left justified.""" +def display_contents_results(actionlist, attrs, sort_attrs, display_headers): + """Print results of a "list" operation. Returns False if no output + was produced.""" - if j != JUST_UNKNOWN: - return j - try: - int(v) - return JUST_RIGHT - except (ValueError, TypeError): - # attribute is non-numeric or is something like - # a list. - return JUST_LEFT + justs = calc_justs(attrs) + lines = list(misc.list_actions_by_attrs(actionlist, attrs)) + widths = calc_widths(lines, attrs) -def create_output_format(display_headers, widths, justs, line): - """Produce a format string that can be used to display results. - - The "display_headers" parameter is whether headers have been displayed - or not. If they have not, then use a simple tab system. If they - have, use the information in the other parameters to control the - formatting of the line. - - The "widths" parameter contains the width to use for each column. - - The "justs" parameter contains the justifications to use for each - column. - - The "line" parameter contains the information that will be displayed - using the resulting format. It's needed so that a choice can be made - about columns with unknown justifications. - """ - - fmt = "" - if display_headers: - # Now that we know all the widths, multiply them by the - # justification values to get positive or negative numbers to - # pass to the format specifier. - line_widths = [ - w * guess_unknown(j, a) - for w, j, a in zip(widths, justs, line) - ] - for n in range(len(line_widths)): - if line_widths[n] < 0: - fmt += "{{{0}!s:<{1}}} ".format(n, - -line_widths[n]) - else: - fmt += "{{{0}!s:>{1}}} ".format(n, - line_widths[n]) - return fmt - for n in range(len(widths)): - fmt += "{{{0}!s}}\t".format(n) - fmt.rstrip("\t") - return fmt + if sort_attrs: + sortidx = 0 + for i, attr in enumerate(attrs): + if attr == sort_attrs[0]: + sortidx = i + break -def display_contents_results(actionlist, attrs, sort_attrs, display_headers): - """Print results of a "list" operation. Returns False if no output - was produced.""" + # Sort numeric columns numerically. + if justs[sortidx] == JUST_RIGHT: + + def key_extract(x): + try: + return int(x[sortidx]) + except (ValueError, TypeError): + return 0 - justs = calc_justs(attrs) - lines = list(misc.list_actions_by_attrs(actionlist, attrs)) - widths = calc_widths(lines, attrs) - - if sort_attrs: - sortidx = 0 - for i, attr in enumerate(attrs): - if attr == sort_attrs[0]: - sortidx = i - break - - # Sort numeric columns numerically. - if justs[sortidx] == JUST_RIGHT: - def key_extract(x): - try: - return int(x[sortidx]) - except (ValueError, TypeError): - return 0 - else: - # typecast all the list elements to string else - # the sorted function fails if there are any - # string and list comparision. - key_extract = lambda x: str(x[sortidx]) - line_gen = sorted(lines, key=key_extract) else: - line_gen = lines + # typecast all the list elements to string else + # the sorted function fails if there are any + # string and list comparision. + key_extract = lambda x: str(x[sortidx]) + line_gen = sorted(lines, key=key_extract) + else: + line_gen = lines + + printed_output = False + for line in line_gen: + text = ( + create_output_format(display_headers, widths, justs, line).format( + *line + ) + ).rstrip() + if not text: + continue + if not printed_output and display_headers: + print_headers(attrs, widths, justs) + printed_output = True + msg(text) + return printed_output - printed_output = False - for line in line_gen: - text = (create_output_format(display_headers, widths, justs, - line).format(*line)).rstrip() - if not text: - continue - if not printed_output and display_headers: - print_headers(attrs, widths, justs) - printed_output = True - msg(text) - return printed_output def check_attrs(attrs, cmd, reference=None, prefixes=None): - """For a set of output attributes ("attrs") passed to a command ("cmd"), - if the attribute lives in a known name space, check whether it is valid. - """ - - if reference is None: - reference = valid_special_attrs - if prefixes is None: - prefixes = valid_special_prefixes - for a in attrs: - for p in prefixes: - if a.startswith(p) and a not in reference: - usage(_("Invalid attribute '{0}'").format(a), - cmd) + """For a set of output attributes ("attrs") passed to a command ("cmd"), + if the attribute lives in a known name space, check whether it is valid. + """ -def list_contents(api_inst, args): - """List package contents. - - If no arguments are given, display for all locally installed packages. - With -H omit headers and use a tab-delimited format; with -o select - attributes to display; with -s, specify attributes to sort on; with -t, - specify which action types to list.""" - - opts, pargs = getopt.getopt(args, "Ha:g:o:s:t:mfr") - - subcommand = "contents" - display_headers = True - display_raw = False - origins = set() - output_fields = False - remote = False - local = False - attrs = [] - sort_attrs = [] - action_types = [] - attr_match = {} - for opt, arg in opts: - if opt == "-H": - display_headers = False - elif opt == "-a": - try: - attr, match = arg.split("=", 1) - except ValueError: - usage(_("-a takes an argument of the form " - "="), cmd=subcommand) - attr_match.setdefault(attr, []).append(match) - elif opt == "-g": - origins.add(misc.parse_uri(arg, cwd=orig_cwd)) - elif opt == "-o": - output_fields = True - attrs.extend(arg.split(",")) - elif opt == "-s": - sort_attrs.append(arg) - elif opt == "-t": - action_types.extend(arg.split(",")) - elif opt == "-r": - remote = True - elif opt == "-m": - display_raw = True - - if origins: - remote = True - elif not remote: - local = True - - if remote and not pargs: - usage(_("contents: must request remote contents for specific " - "packages"), cmd=subcommand) - - if display_raw: - display_headers = False - attrs = ["action.raw"] - - invalid = set(("-H", "-o", "-t", "-s", "-a")). \ - intersection(set([x[0] for x in opts])) - - if len(invalid) > 0: - usage(_("-m and {0} may not be specified at the same " - "time").format(invalid.pop()), cmd=subcommand) - - if action_types: - invalid_atype = [atype - for atype in action_types - if atype not in default_attrs] - if invalid_atype == action_types: - usage(_("no valid action types specified"), - cmd=subcommand) - elif invalid_atype: - emsg(_("""\ -WARNING: invalid action types specified: {0} -""".format(",".join(invalid_atype)))) + if reference is None: + reference = valid_special_attrs + if prefixes is None: + prefixes = valid_special_prefixes + for a in attrs: + for p in prefixes: + if a.startswith(p) and a not in reference: + usage(_("Invalid attribute '{0}'").format(a), cmd) - check_attrs(attrs, subcommand) - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) +def list_contents(api_inst, args): + """List package contents. + + If no arguments are given, display for all locally installed packages. + With -H omit headers and use a tab-delimited format; with -o select + attributes to display; with -s, specify attributes to sort on; with -t, + specify which action types to list.""" + + opts, pargs = getopt.getopt(args, "Ha:g:o:s:t:mfr") + + subcommand = "contents" + display_headers = True + display_raw = False + origins = set() + output_fields = False + remote = False + local = False + attrs = [] + sort_attrs = [] + action_types = [] + attr_match = {} + for opt, arg in opts: + if opt == "-H": + display_headers = False + elif opt == "-a": + try: + attr, match = arg.split("=", 1) + except ValueError: + usage( + _( + "-a takes an argument of the form " + "=" + ), + cmd=subcommand, + ) + attr_match.setdefault(attr, []).append(match) + elif opt == "-g": + origins.add(misc.parse_uri(arg, cwd=orig_cwd)) + elif opt == "-o": + output_fields = True + attrs.extend(arg.split(",")) + elif opt == "-s": + sort_attrs.append(arg) + elif opt == "-t": + action_types.extend(arg.split(",")) + elif opt == "-r": + remote = True + elif opt == "-m": + display_raw = True + + if origins: + remote = True + elif not remote: + local = True + + if remote and not pargs: + usage( + _( + "contents: must request remote contents for specific " + "packages" + ), + cmd=subcommand, + ) - api_inst.log_operation_start(subcommand) - if local: - pkg_list = api.ImageInterface.LIST_INSTALLED - elif remote: - pkg_list = api.ImageInterface.LIST_NEWEST - - # - # If the user specifies no specific attrs, and no specific - # sort order, then we fill in some defaults. - # - if not attrs: - if not action_types: - # XXX Possibly have multiple exclusive attributes per - # column? If listing dependencies and files, you could - # have a path/fmri column which would list paths for - # files and fmris for dependencies. - attrs = ["path"] - else: - # Choose default attrs based on specified action - # types. A list is used here instead of a set is - # because we want to maintain the order of the - # attributes in which the users specify. - for attr in itertools.chain.from_iterable( - default_attrs.get(atype, EmptyI) - for atype in action_types): - if attr not in attrs: - attrs.append(attr) - - if not sort_attrs and not display_raw: - # XXX reverse sorting - # Most likely want to sort by path, so don't force people to - # make it explicit - if "path" in attrs: - sort_attrs = ["path"] - else: - sort_attrs = attrs[:1] + if display_raw: + display_headers = False + attrs = ["action.raw"] - # if we want a raw display (contents -m), disable the automatic - # variant filtering that normally limits working set. - if display_raw: - excludes = EmptyI - else: - excludes = api_inst.excludes + invalid = set(("-H", "-o", "-t", "-s", "-a")).intersection( + set([x[0] for x in opts]) + ) - # Now get the matching list of packages and display it. - processed = False - notfound = EmptyI - try: - res = api_inst.get_pkg_list(pkg_list, patterns=pargs, - raise_unmatched=True, ranked=remote, return_fmris=True, - variants=True, repos=origins) - manifests = [] - - for pfmri, summ, cats, states, pattrs in res: - manifests.append(api_inst.get_manifest(pfmri, - all_variants=display_raw, repos=origins)) - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.InvalidPackageErrors as e: - error(str(e), cmd=subcommand) - api_inst.log_operation_end( - result=RESULT_FAILED_UNKNOWN) - return EXIT_OOPS - except api_errors.CatalogRefreshException as e: - display_catalog_failures(e) - return EXIT_OOPS - except api_errors.InventoryException as e: - if e.illegal: - for i in e.illegal: - error(i) - api_inst.log_operation_end( - result=RESULT_FAILED_BAD_REQUEST) - return EXIT_OOPS - notfound = e.notfound - else: - if local and not manifests and not pargs: - error(_("no packages installed"), cmd=subcommand) - api_inst.log_operation_end( - result=RESULT_NOTHING_TO_DO) - return EXIT_OOPS - - # Build a generator expression based on whether specific action types - # were provided. - if action_types: - # If query is limited to specific action types, use the more - # efficient type-based generation mechanism. - gen_expr = ( - (m.fmri, a, None, None, None) - for m in manifests - for a in m.gen_actions_by_types(action_types, - attr_match=attr_match, excludes=excludes) + if len(invalid) > 0: + usage( + _("-m and {0} may not be specified at the same " "time").format( + invalid.pop() + ), + cmd=subcommand, + ) + + if action_types: + invalid_atype = [ + atype for atype in action_types if atype not in default_attrs + ] + if invalid_atype == action_types: + usage(_("no valid action types specified"), cmd=subcommand) + elif invalid_atype: + emsg( + _( + """\ +WARNING: invalid action types specified: {0} +""".format( + ",".join(invalid_atype) + ) ) + ) + + check_attrs(attrs, subcommand) + + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) + + api_inst.log_operation_start(subcommand) + if local: + pkg_list = api.ImageInterface.LIST_INSTALLED + elif remote: + pkg_list = api.ImageInterface.LIST_NEWEST + + # + # If the user specifies no specific attrs, and no specific + # sort order, then we fill in some defaults. + # + if not attrs: + if not action_types: + # XXX Possibly have multiple exclusive attributes per + # column? If listing dependencies and files, you could + # have a path/fmri column which would list paths for + # files and fmris for dependencies. + attrs = ["path"] else: - gen_expr = ( - (m.fmri, a, None, None, None) - for m in manifests - for a in m.gen_actions(attr_match=attr_match, - excludes=excludes) + # Choose default attrs based on specified action + # types. A list is used here instead of a set is + # because we want to maintain the order of the + # attributes in which the users specify. + for attr in itertools.chain.from_iterable( + default_attrs.get(atype, EmptyI) for atype in action_types + ): + if attr not in attrs: + attrs.append(attr) + + if not sort_attrs and not display_raw: + # XXX reverse sorting + # Most likely want to sort by path, so don't force people to + # make it explicit + if "path" in attrs: + sort_attrs = ["path"] + else: + sort_attrs = attrs[:1] + + # if we want a raw display (contents -m), disable the automatic + # variant filtering that normally limits working set. + if display_raw: + excludes = EmptyI + else: + excludes = api_inst.excludes + + # Now get the matching list of packages and display it. + processed = False + notfound = EmptyI + try: + res = api_inst.get_pkg_list( + pkg_list, + patterns=pargs, + raise_unmatched=True, + ranked=remote, + return_fmris=True, + variants=True, + repos=origins, + ) + manifests = [] + + for pfmri, summ, cats, states, pattrs in res: + manifests.append( + api_inst.get_manifest( + pfmri, all_variants=display_raw, repos=origins ) + ) + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.InvalidPackageErrors as e: + error(str(e), cmd=subcommand) + api_inst.log_operation_end(result=RESULT_FAILED_UNKNOWN) + return EXIT_OOPS + except api_errors.CatalogRefreshException as e: + display_catalog_failures(e) + return EXIT_OOPS + except api_errors.InventoryException as e: + if e.illegal: + for i in e.illegal: + error(i) + api_inst.log_operation_end(result=RESULT_FAILED_BAD_REQUEST) + return EXIT_OOPS + notfound = e.notfound + else: + if local and not manifests and not pargs: + error(_("no packages installed"), cmd=subcommand) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + return EXIT_OOPS + + # Build a generator expression based on whether specific action types + # were provided. + if action_types: + # If query is limited to specific action types, use the more + # efficient type-based generation mechanism. + gen_expr = ( + (m.fmri, a, None, None, None) + for m in manifests + for a in m.gen_actions_by_types( + action_types, attr_match=attr_match, excludes=excludes + ) + ) + else: + gen_expr = ( + (m.fmri, a, None, None, None) + for m in manifests + for a in m.gen_actions(attr_match=attr_match, excludes=excludes) + ) - # Determine if the query returned any results by "peeking" at the first - # value returned from the generator expression. - try: - found = next(gen_expr) - except StopIteration: - found = None - actionlist = [] - - if found: - # If any matching entries were found, create a new generator - # expression using itertools.chain that includes the first - # result. - actionlist = itertools.chain([found], gen_expr) - - rval = EXIT_OK - if attr_match and manifests and not found: - rval = EXIT_OOPS - logger.error(_("""\ -pkg: contents: no matching actions found in the listed packages""")) - - if manifests and rval == EXIT_OK: - displayed_results = display_contents_results(actionlist, attrs, - sort_attrs, display_headers) - - if not displayed_results: - if output_fields: - error(gettext.ngettext("""\ + # Determine if the query returned any results by "peeking" at the first + # value returned from the generator expression. + try: + found = next(gen_expr) + except StopIteration: + found = None + actionlist = [] + + if found: + # If any matching entries were found, create a new generator + # expression using itertools.chain that includes the first + # result. + actionlist = itertools.chain([found], gen_expr) + + rval = EXIT_OK + if attr_match and manifests and not found: + rval = EXIT_OOPS + logger.error( + _( + """\ +pkg: contents: no matching actions found in the listed packages""" + ) + ) + + if manifests and rval == EXIT_OK: + displayed_results = display_contents_results( + actionlist, attrs, sort_attrs, display_headers + ) + + if not displayed_results: + if output_fields: + error( + gettext.ngettext( + """\ This package contains no actions with the fields specified using the -o option. Please specify other fields, or use the -m option to show the raw -package manifests.""", """\ +package manifests.""", + """\ These packages contain no actions with the fields specified using the -o option. Please specify other fields, or use the -m option to show the raw -package manifests.""", len(pargs))) - elif not actionlist: - error(gettext.ngettext("""\ +package manifests.""", + len(pargs), + ) + ) + elif not actionlist: + error( + gettext.ngettext( + """\ This package contains no actions specified using the -t option. Please specify other fields, or use the -m option to show the raw package -manifests.""", """\ +manifests.""", + """\ These package contains no actions specified using the -t option. Please specify other fields, or use the -m option to show the raw package -manifests.""", len(pargs))) - else: - error(gettext.ngettext("""\ +manifests.""", + len(pargs), + ) + ) + else: + error( + gettext.ngettext( + """\ This package delivers no filesystem content, but may contain metadata. Use the -o option to specify fields other than 'path', or use the -m option to show -the raw package manifests.""", """\ +the raw package manifests.""", + """\ These packages deliver no filesystem content, but may contain metadata. Use the -o option to specify fields other than 'path', or use the -m option to show -the raw package manifests.""", len(pargs))) - - if notfound: - rval = EXIT_OOPS - if manifests: - logger.error("") - if local: - logger.error(_("""\ +the raw package manifests.""", + len(pargs), + ) + ) + + if notfound: + rval = EXIT_OOPS + if manifests: + logger.error("") + if local: + logger.error( + _( + """\ pkg: contents: no packages matching the following patterns you specified are -installed on the system. Try specifying -r to query remotely:""")) - elif remote: - logger.error(_("""\ +installed on the system. Try specifying -r to query remotely:""" + ) + ) + elif remote: + logger.error( + _( + """\ pkg: contents: no packages matching the following patterns you specified were found in the catalog. Try relaxing the patterns, refreshing, and/or -examining the catalogs:""")) - logger.error("") - for p in notfound: - logger.error(" {0}".format(p)) - api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) - else: - api_inst.log_operation_end(result=RESULT_SUCCEEDED) - return rval +examining the catalogs:""" + ) + ) + logger.error("") + for p in notfound: + logger.error(" {0}".format(p)) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + else: + api_inst.log_operation_end(result=RESULT_SUCCEEDED) + return rval def display_catalog_failures(cre, ignore_perms_failure=False): - total = cre.total - succeeded = cre.succeeded - partial = 0 - refresh_errstr = "" - - for pub, err in cre.failed: - if isinstance(err, api_errors.CatalogOriginRefreshException): - if len(err.failed) < err.total: - partial += 1 - - refresh_errstr += _("\n{0}/{1} repositories for " \ - "publisher '{2}' could not be reached for " \ - "catalog refresh.\n").format( - len(err.failed), err.total, pub) - for o, e in err.failed: - refresh_errstr += "\n" - refresh_errstr += str(e) - - refresh_errstr += "\n" - else: - refresh_errstr += "\n \n" + str(err) + total = cre.total + succeeded = cre.succeeded + partial = 0 + refresh_errstr = "" + + for pub, err in cre.failed: + if isinstance(err, api_errors.CatalogOriginRefreshException): + if len(err.failed) < err.total: + partial += 1 + + refresh_errstr += _( + "\n{0}/{1} repositories for " + "publisher '{2}' could not be reached for " + "catalog refresh.\n" + ).format(len(err.failed), err.total, pub) + for o, e in err.failed: + refresh_errstr += "\n" + refresh_errstr += str(e) + + refresh_errstr += "\n" + else: + refresh_errstr += "\n \n" + str(err) + partial_str = ":" + if partial: + partial_str = _(" ({0} partial):").format(str(partial)) - partial_str = ":" - if partial: - partial_str = _(" ({0} partial):").format(str(partial)) + txt = _( + "pkg: {succeeded}/{total} catalogs successfully " "updated{partial}" + ).format(succeeded=succeeded, total=total, partial=partial_str) + if cre.failed: + # This ensures that the text gets printed before the errors. + logger.error(txt) + else: + msg(txt) - txt = _("pkg: {succeeded}/{total} catalogs successfully " - "updated{partial}").format(succeeded=succeeded, total=total, - partial=partial_str) - if cre.failed: - # This ensures that the text gets printed before the errors. - logger.error(txt) - else: - msg(txt) - - for pub, err in cre.failed: - if ignore_perms_failure and \ - not isinstance(err, api_errors.PermissionsException): - # If any errors other than a permissions exception are - # found, then don't ignore them. - ignore_perms_failure = False - break + for pub, err in cre.failed: + if ignore_perms_failure and not isinstance( + err, api_errors.PermissionsException + ): + # If any errors other than a permissions exception are + # found, then don't ignore them. + ignore_perms_failure = False + break - if cre.failed and ignore_perms_failure: - # Consider those that failed to have succeeded and add them - # to the actual successful total. - return succeeded + partial + len(cre.failed) + if cre.failed and ignore_perms_failure: + # Consider those that failed to have succeeded and add them + # to the actual successful total. + return succeeded + partial + len(cre.failed) - logger.error(refresh_errstr) + logger.error(refresh_errstr) - if cre.errmessage: - logger.error(cre.errmessage) + if cre.errmessage: + logger.error(cre.errmessage) - return succeeded + partial + return succeeded + partial def display_repo_failures(fail_dict): - - outstr = """ + outstr = """ WARNING: Errors were encountered when attempting to retrieve package catalog information. Packages added to the affected publisher repositories since the last retrieval may not be available. """ - for pub in fail_dict: - failed = fail_dict[pub] + for pub in fail_dict: + failed = fail_dict[pub] - if failed is None or "errors" not in failed: - # This pub did not have any repo problems, ignore. - continue + if failed is None or "errors" not in failed: + # This pub did not have any repo problems, ignore. + continue - assert type(failed) == dict - total = failed["total"] - if int(total) == 1: - repo_str = _("repository") - else: - repo_str = _("{0} of {1} repositories").format( - len(failed["errors"]), total) + assert type(failed) == dict + total = failed["total"] + if int(total) == 1: + repo_str = _("repository") + else: + repo_str = _("{0} of {1} repositories").format( + len(failed["errors"]), total + ) + + outstr += _( + "Errors were encountered when attempting to " + "contact {0} for publisher '{1}'.\n" + ).format(repo_str, pub) + for err in failed["errors"]: + outstr += "\n" + outstr += str(err) + outstr += "\n" - outstr += _("Errors were encountered when attempting to " \ - "contact {0} for publisher '{1}'.\n").format(repo_str, pub) - for err in failed["errors"]: - outstr += "\n" - outstr += str(err) - outstr += "\n" + msg(outstr) - msg(outstr) def __refresh(api_inst, pubs, full_refresh=False): - """Private helper method for refreshing publisher data.""" + """Private helper method for refreshing publisher data.""" + + try: + # The user explicitly requested this refresh, so set the + # refresh to occur immediately. + api_inst.refresh( + full_refresh=full_refresh, + ignore_unreachable=False, + immediate=True, + pubs=pubs, + ) + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.PublisherError as e: + error(e) + error(_("'pkg publisher' will show a list of publishers.")) + return EXIT_OOPS + except (api_errors.UnknownErrors, api_errors.PermissionsException) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + error("\n" + str(e)) + return EXIT_OOPS + except api_errors.CatalogRefreshException as e: + if display_catalog_failures(e) == 0: + return EXIT_OOPS + return EXIT_PARTIAL + return EXIT_OK - try: - # The user explicitly requested this refresh, so set the - # refresh to occur immediately. - api_inst.refresh(full_refresh=full_refresh, - ignore_unreachable=False, immediate=True, pubs=pubs) - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.PublisherError as e: - error(e) - error(_("'pkg publisher' will show a list of publishers.")) - return EXIT_OOPS - except (api_errors.UnknownErrors, api_errors.PermissionsException) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - error("\n" + str(e)) - return EXIT_OOPS - except api_errors.CatalogRefreshException as e: - if display_catalog_failures(e) == 0: - return EXIT_OOPS - return EXIT_PARTIAL - return EXIT_OK def publisher_refresh(api_inst, args): - """Update metadata for the image's publishers.""" - - # XXX will need to show available content series for each package - full_refresh = False - opts, pargs = getopt.getopt(args, "q", ["full"]) - for opt, arg in opts: - if opt == "-q": - global_settings.client_output_quiet = True - if opt == "--full": - full_refresh = True - # Reset the progress tracker here, because we may have - # to switch to a different tracker due to the options parse. - _api_inst.progresstracker = get_tracker() - # suppress phase information since we're doing just one thing. - api_inst.progresstracker.set_major_phase( - api_inst.progresstracker.PHASE_UTILITY) - return __refresh(api_inst, pargs, full_refresh=full_refresh) + """Update metadata for the image's publishers.""" + + # XXX will need to show available content series for each package + full_refresh = False + opts, pargs = getopt.getopt(args, "q", ["full"]) + for opt, arg in opts: + if opt == "-q": + global_settings.client_output_quiet = True + if opt == "--full": + full_refresh = True + # Reset the progress tracker here, because we may have + # to switch to a different tracker due to the options parse. + _api_inst.progresstracker = get_tracker() + # suppress phase information since we're doing just one thing. + api_inst.progresstracker.set_major_phase( + api_inst.progresstracker.PHASE_UTILITY + ) + return __refresh(api_inst, pargs, full_refresh=full_refresh) + def copy_publishers_from(api_inst, args): - """pkg copy-publishers-from """ - opts, pargs = getopt.getopt(args, "") - if len(pargs) != 1: - usage(_("directory to copy from must be specified"), - cmd="copy-publishers-from") - src_api_inst = __api_alloc(pargs[0], True, False) - if not src_api_inst: - return EXIT_OOPS - for n, pub in enumerate(src_api_inst.get_publishers()): - search_first = True if n == 0 else False - if api_inst.has_publisher(prefix=pub.prefix, alias=pub.prefix): - api_inst.remove_publisher(prefix=pub.prefix, - alias=pub.prefix) - pub.reset_client_uuid() - api_inst.add_publisher(pub, refresh_allowed=False, - search_first=search_first) - api_inst.refresh() - return EXIT_OK + """pkg copy-publishers-from """ + opts, pargs = getopt.getopt(args, "") + if len(pargs) != 1: + usage( + _("directory to copy from must be specified"), + cmd="copy-publishers-from", + ) + src_api_inst = __api_alloc(pargs[0], True, False) + if not src_api_inst: + return EXIT_OOPS + for n, pub in enumerate(src_api_inst.get_publishers()): + search_first = True if n == 0 else False + if api_inst.has_publisher(prefix=pub.prefix, alias=pub.prefix): + api_inst.remove_publisher(prefix=pub.prefix, alias=pub.prefix) + pub.reset_client_uuid() + api_inst.add_publisher( + pub, refresh_allowed=False, search_first=search_first + ) + api_inst.refresh() + return EXIT_OK + def list_uuids(api_inst, args): - """pkg list-uuids""" - for n, pub in enumerate(api_inst.get_publishers()): - msg("{0:18} {1} {2}".format(pub.prefix, - pub.client_uuid, pub.client_uuid_time)); - return EXIT_OK + """pkg list-uuids""" + for n, pub in enumerate(api_inst.get_publishers()): + msg( + "{0:18} {1} {2}".format( + pub.prefix, pub.client_uuid, pub.client_uuid_time + ) + ) + return EXIT_OK + def _get_ssl_cert_key(root, is_zone, ssl_cert, ssl_key): - if ssl_cert is not None or ssl_key is not None: - # In the case of zones, the ssl cert given is assumed to - # be relative to the root of the image, not truly absolute. - if is_zone: - if ssl_cert is not None: - ssl_cert = os.path.abspath( - root + os.sep + ssl_cert) - if ssl_key is not None: - ssl_key = os.path.abspath( - root + os.sep + ssl_key) - elif orig_cwd: - if ssl_cert and not os.path.isabs(ssl_cert): - ssl_cert = os.path.normpath(os.path.join( - orig_cwd, ssl_cert)) - if ssl_key and not os.path.isabs(ssl_key): - ssl_key = os.path.normpath(os.path.join( - orig_cwd, ssl_key)) - return ssl_cert, ssl_key + if ssl_cert is not None or ssl_key is not None: + # In the case of zones, the ssl cert given is assumed to + # be relative to the root of the image, not truly absolute. + if is_zone: + if ssl_cert is not None: + ssl_cert = os.path.abspath(root + os.sep + ssl_cert) + if ssl_key is not None: + ssl_key = os.path.abspath(root + os.sep + ssl_key) + elif orig_cwd: + if ssl_cert and not os.path.isabs(ssl_cert): + ssl_cert = os.path.normpath(os.path.join(orig_cwd, ssl_cert)) + if ssl_key and not os.path.isabs(ssl_key): + ssl_key = os.path.normpath(os.path.join(orig_cwd, ssl_key)) + return ssl_cert, ssl_key + def _set_pub_error_wrap(func, pfx, raise_errors, *args, **kwargs): - """Helper function to wrap set-publisher private methods. Returns - a tuple of (return value, message). Callers should check the return - value for errors.""" + """Helper function to wrap set-publisher private methods. Returns + a tuple of (return value, message). Callers should check the return + value for errors.""" + + try: + return func(*args, **kwargs) + except api_errors.CatalogRefreshException as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + txt = _("Could not refresh the catalog for {0}\n").format(pfx) + for pub, err in e.failed: + txt += " \n{0}".format(err) + return EXIT_OOPS, txt + except api_errors.InvalidDepotResponseException as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + if pfx: + return EXIT_OOPS, _( + "The origin URIs for '{pubname}' " + "do not appear to point to a valid pkg repository." + "\nPlease verify the repository's location and the " + "client's network configuration." + "\nAdditional details:\n\n{details}" + ).format(pubname=pfx, details=str(e)) + return EXIT_OOPS, _( + "The specified URI does not appear to " + "point to a valid pkg repository.\nPlease check the URI " + "and the client's network configuration." + "\nAdditional details:\n\n{0}" + ).format(str(e)) + except api_errors.ImageFormatUpdateNeeded as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + format_update_error(e) + return EXIT_OOPS, "" + except api_errors.ApiException as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + return EXIT_OOPS, ("\n" + str(e)) + + +def publisher_set( + op, + api_inst, + pargs, + ssl_key, + ssl_cert, + origin_uri, + reset_uuid, + add_mirrors, + remove_mirrors, + add_origins, + remove_origins, + enable_origins, + disable_origins, + refresh_allowed, + disable, + sticky, + search_before, + search_after, + search_first, + approved_ca_certs, + revoked_ca_certs, + unset_ca_certs, + set_props, + add_prop_values, + remove_prop_values, + unset_props, + repo_uri, + proxy_uri, + li_erecurse, + verbose, +): + """pkg set-publisher [-Pedv] [-k ssl_key] [-c ssl_cert] [--reset-uuid] + [-O|--origin_uri origin to set] + [-g|--add-origin origin to add] [-G|--remove-origin origin to + remove] [-m|--add-mirror mirror to add] [-M|--remove-mirror mirror + to remove] [-p repo_uri] [--enable] [--disable] [--no-refresh] + [-R | -r [-z zonename... | -Z zonename...]] + [--sticky] [--non-sticky ] [--search-before=publisher] + [--search-after=publisher] + [--approve-ca-cert path to CA] + [--revoke-ca-cert hash of CA to remove] + [--unset-ca-cert hash of CA to unset] + [--set-property name of property=value] + [--add-property-value name of property=value to add] + [--remove-property-value name of property=value to remove] + [--unset-property name of property to delete] + [--proxy proxy to use] + [publisher]""" + + if origin_uri: + origin_uri = misc.parse_uri(origin_uri, cwd=orig_cwd) + + out_json = client_api._publisher_set( + op, + api_inst, + pargs, + ssl_key, + ssl_cert, + origin_uri, + reset_uuid, + add_mirrors, + remove_mirrors, + add_origins, + remove_origins, + enable_origins, + disable_origins, + refresh_allowed, + disable, + sticky, + search_before, + search_after, + search_first, + approved_ca_certs, + revoked_ca_certs, + unset_ca_certs, + set_props, + add_prop_values, + remove_prop_values, + unset_props, + repo_uri, + proxy_uri, + ) + + errors = None + if "errors" in out_json: + errors = out_json["errors"] + errors = _generate_error_messages( + out_json["status"], errors, selected_type=["publisher_set"] + ) + elif li_erecurse is not None: + for li_name, li_rel, li_path in api_inst.list_linked(): + if li_rel != "child": + continue + if li_name not in li_erecurse: + continue + if verbose: + print("Updating publisher on", li_name) + li_api = __api_alloc(li_path, True, False) + if not li_api: + continue + li_json = client_api._publisher_set( + op, + li_api, + pargs, + ssl_key, + ssl_cert, + origin_uri, + reset_uuid, + add_mirrors, + remove_mirrors, + add_origins, + remove_origins, + enable_origins, + disable_origins, + refresh_allowed, + disable, + sticky, + search_before, + search_after, + search_first, + approved_ca_certs, + revoked_ca_certs, + unset_ca_certs, + set_props, + add_prop_values, + remove_prop_values, + unset_props, + repo_uri, + proxy_uri, + ) + if "errors" in li_json: + if not errors: + errors = [] + errors.append( + _generate_error_messages( + li_json["status"], + li_json["errors"], + selected_type=["publisher_set"], + ) + ) - try: - return func(*args, **kwargs) - except api_errors.CatalogRefreshException as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - txt = _("Could not refresh the catalog for {0}\n").format( - pfx) - for pub, err in e.failed: - txt += " \n{0}".format(err) - return EXIT_OOPS, txt - except api_errors.InvalidDepotResponseException as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - if pfx: - return EXIT_OOPS, _("The origin URIs for '{pubname}' " - "do not appear to point to a valid pkg repository." - "\nPlease verify the repository's location and the " - "client's network configuration." - "\nAdditional details:\n\n{details}").format( - pubname=pfx, details=str(e)) - return EXIT_OOPS, _("The specified URI does not appear to " - "point to a valid pkg repository.\nPlease check the URI " - "and the client's network configuration." - "\nAdditional details:\n\n{0}").format(str(e)) - except api_errors.ImageFormatUpdateNeeded as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - format_update_error(e) - return EXIT_OOPS, "" - except api_errors.ApiException as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - return EXIT_OOPS, ("\n" + str(e)) - -def publisher_set(op, api_inst, pargs, ssl_key, ssl_cert, origin_uri, - reset_uuid, add_mirrors, remove_mirrors, add_origins, remove_origins, - enable_origins, disable_origins, refresh_allowed, disable, sticky, - search_before, search_after, search_first, approved_ca_certs, - revoked_ca_certs, unset_ca_certs, set_props, add_prop_values, - remove_prop_values, unset_props, repo_uri, proxy_uri, - li_erecurse, verbose): - """pkg set-publisher [-Pedv] [-k ssl_key] [-c ssl_cert] [--reset-uuid] - [-O|--origin_uri origin to set] - [-g|--add-origin origin to add] [-G|--remove-origin origin to - remove] [-m|--add-mirror mirror to add] [-M|--remove-mirror mirror - to remove] [-p repo_uri] [--enable] [--disable] [--no-refresh] - [-R | -r [-z zonename... | -Z zonename...]] - [--sticky] [--non-sticky ] [--search-before=publisher] - [--search-after=publisher] - [--approve-ca-cert path to CA] - [--revoke-ca-cert hash of CA to remove] - [--unset-ca-cert hash of CA to unset] - [--set-property name of property=value] - [--add-property-value name of property=value to add] - [--remove-property-value name of property=value to remove] - [--unset-property name of property to delete] - [--proxy proxy to use] - [publisher] """ - - if origin_uri: - origin_uri = misc.parse_uri(origin_uri, cwd=orig_cwd) - - out_json = client_api._publisher_set(op, api_inst, pargs, ssl_key, - ssl_cert, origin_uri, reset_uuid, add_mirrors, remove_mirrors, - add_origins, remove_origins, enable_origins, disable_origins, - refresh_allowed, disable, sticky, search_before, search_after, - search_first, approved_ca_certs, revoked_ca_certs, unset_ca_certs, - set_props, add_prop_values, remove_prop_values, unset_props, - repo_uri, proxy_uri) - - errors = None - if "errors" in out_json: - errors = out_json["errors"] - errors = _generate_error_messages(out_json["status"], errors, - selected_type=["publisher_set"]) - elif li_erecurse is not None: - for li_name, li_rel, li_path in api_inst.list_linked(): - if li_rel != "child": continue - if li_name not in li_erecurse: continue - if verbose: - print("Updating publisher on", li_name) - li_api = __api_alloc(li_path, True, False) - if not li_api: continue - li_json = client_api._publisher_set(op, li_api, - pargs, ssl_key, ssl_cert, origin_uri, reset_uuid, - add_mirrors, remove_mirrors, add_origins, - remove_origins, enable_origins, disable_origins, - refresh_allowed, disable, sticky, - search_before, search_after, search_first, - approved_ca_certs, revoked_ca_certs, unset_ca_certs, - set_props, add_prop_values, remove_prop_values, - unset_props, repo_uri, proxy_uri) - if "errors" in li_json: - if not errors: errors = [] - errors.append(_generate_error_messages( - li_json["status"], li_json["errors"], - selected_type=["publisher_set"])) - - if "data" in out_json: - if "header" in out_json["data"]: - logger.info(out_json["data"]["header"]) - if "added" in out_json["data"]: - logger.info(_(" Added publisher(s): {0}").format( - ", ".join(out_json["data"]["added"]))) - if "updated" in out_json["data"]: - logger.info(_(" Updated publisher(s): {0}").format( - ", ".join(out_json["data"]["updated"]))) - - if errors: - _generate_error_messages(out_json["status"], errors, - cmd="set-publisher", add_info={"repo_uri": repo_uri}) + if "data" in out_json: + if "header" in out_json["data"]: + logger.info(out_json["data"]["header"]) + if "added" in out_json["data"]: + logger.info( + _(" Added publisher(s): {0}").format( + ", ".join(out_json["data"]["added"]) + ) + ) + if "updated" in out_json["data"]: + logger.info( + _(" Updated publisher(s): {0}").format( + ", ".join(out_json["data"]["updated"]) + ) + ) + + if errors: + _generate_error_messages( + out_json["status"], + errors, + cmd="set-publisher", + add_info={"repo_uri": repo_uri}, + ) + + return out_json["status"] - return out_json["status"] def publisher_unset(api_inst, pargs): - """pkg unset-publisher publisher ...""" + """pkg unset-publisher publisher ...""" - opts, pargs = getopt.getopt(pargs, "") - out_json = client_api._publisher_unset("unset-publisher", api_inst, - pargs) + opts, pargs = getopt.getopt(pargs, "") + out_json = client_api._publisher_unset("unset-publisher", api_inst, pargs) - if "errors" in out_json: - _generate_error_messages(out_json["status"], - out_json["errors"], cmd="unset-publisher") + if "errors" in out_json: + _generate_error_messages( + out_json["status"], out_json["errors"], cmd="unset-publisher" + ) - return out_json["status"] + return out_json["status"] -def publisher_list(op, api_inst, pargs, omit_headers, preferred_only, - inc_disabled, output_format): - """pkg publishers.""" - ret_json = client_api._publisher_list(op, api_inst, pargs, omit_headers, - preferred_only, inc_disabled, output_format) - retcode = ret_json["status"] +def publisher_list( + op, + api_inst, + pargs, + omit_headers, + preferred_only, + inc_disabled, + output_format, +): + """pkg publishers.""" + + ret_json = client_api._publisher_list( + op, + api_inst, + pargs, + omit_headers, + preferred_only, + inc_disabled, + output_format, + ) + retcode = ret_json["status"] + + if len(pargs) == 0: + # Create a formatting string for the default output + # format. + if output_format == "default": + fmt = "{0:14} {1:12} {2:8} {3:2} {4} {5}" + + # Create a formatting string for the tsv output + # format. + if output_format == "tsv": + fmt = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}" + + # Output an header if desired. + if not omit_headers: + msg(fmt.format(*ret_json["data"]["headers"])) + + for p in ret_json["data"]["publishers"]: + msg(fmt.format(*p)) + else: + + def display_signing_certs(p): + if "Approved CAs" in p: + msg(_(" Approved CAs:"), p["Approved CAs"][0]) + for h in p["Approved CAs"][1:]: + msg(_(" :"), h) + if "Revoked CAs" in p: + msg(_(" Revoked CAs:"), p["Revoked CAs"][0]) + for h in p["Revoked CAs"][1:]: + msg(_(" :"), h) + + def display_ssl_info(uri_data): + msg(_(" SSL Key:"), uri_data["SSL Key"]) + msg(_(" SSL Cert:"), uri_data["SSL Cert"]) + + if "errors" in ret_json: + for e in ret_json["errors"]: + if "errtype" in e and e["errtype"] == "cert_info": + emsg(e["reason"]) + + if "Cert. Effective Date" in uri_data: + msg( + _(" Cert. Effective Date:"), + uri_data["Cert. Effective Date"], + ) + msg( + _("Cert. Expiration Date:"), + uri_data["Cert. Expiration Date"], + ) - if len(pargs) == 0: - # Create a formatting string for the default output - # format. - if output_format == "default": - fmt = "{0:14} {1:12} {2:8} {3:2} {4} {5}" + if ( + "data" not in ret_json + or "publisher_details" not in ret_json["data"] + ): + return retcode + + for pub in ret_json["data"]["publisher_details"]: + msg("") + msg(_(" Publisher:"), pub["Publisher"]) + msg(_(" Alias:"), pub["Alias"]) + + if "origins" in pub: + for od in pub["origins"]: + msg(_(" Origin URI:"), od["Origin URI"]) + msg(_(" Origin Status:"), od["Status"]) + if "Proxy" in od: + msg(_(" Proxy:"), ", ".join(od["Proxy"])) + display_ssl_info(od) + + if "mirrors" in pub: + for md in pub["mirrors"]: + msg(_(" Mirror URI:"), md["Mirror URI"]) + msg(_(" Mirror Status:"), md["Status"]) + if "Proxy" in md: + msg(_(" Proxy:"), ", ".join(md["Proxy"])) + display_ssl_info(md) + + msg(_(" Client UUID:"), pub["Client UUID"]) + msg(_(" Catalog Updated:"), pub["Catalog Updated"]) + display_signing_certs(pub) + msg(_(" Publisher enabled:"), _(pub["enabled"])) + + if "Properties" not in pub: + continue + pub_items = sorted(six.iteritems(pub["Properties"])) + property_padding = " " + properties_displayed = False + for k, v in pub_items: + if not v: + continue + if not properties_displayed: + msg(_(" Properties:")) + properties_displayed = True + if not isinstance(v, six.string_types): + v = ", ".join(sorted(v)) + msg(property_padding, k + " =", str(v)) + return retcode - # Create a formatting string for the tsv output - # format. - if output_format == "tsv": - fmt = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}" - # Output an header if desired. - if not omit_headers: - msg(fmt.format(*ret_json["data"]["headers"])) +def property_add_value(api_inst, args): + """pkg add-property-value propname propvalue""" + + # ensure no options are passed in + subcommand = "add-property-value" + opts, pargs = getopt.getopt(args, "") + try: + propname, propvalue = pargs + except ValueError: + usage(_("requires a property name and value"), cmd=subcommand) + + # XXX image property management should be in pkg.client.api + try: + img.add_property_value(propname, propvalue) + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.ApiException as e: + error(str(e), cmd=subcommand) + return EXIT_OOPS + return EXIT_OK - for p in ret_json["data"]["publishers"]: - msg(fmt.format(*p)) - else: - def display_signing_certs(p): - if "Approved CAs" in p: - msg(_(" Approved CAs:"), - p["Approved CAs"][0]) - for h in p["Approved CAs"][1:]: - msg(_(" :"), h) - if "Revoked CAs" in p: - msg(_(" Revoked CAs:"), - p["Revoked CAs"][0]) - for h in p["Revoked CAs"][1:]: - msg(_(" :"), h) - - def display_ssl_info(uri_data): - msg(_(" SSL Key:"), uri_data["SSL Key"]) - msg(_(" SSL Cert:"), uri_data["SSL Cert"]) - - if "errors" in ret_json: - for e in ret_json["errors"]: - if "errtype" in e and \ - e["errtype"] == "cert_info": - emsg(e["reason"]) - - if "Cert. Effective Date" in uri_data: - msg(_(" Cert. Effective Date:"), - uri_data["Cert. Effective Date"]) - msg(_("Cert. Expiration Date:"), - uri_data["Cert. Expiration Date"]) - - if "data" not in ret_json or "publisher_details" not in \ - ret_json["data"]: - return retcode - - for pub in ret_json["data"]["publisher_details"]: - msg("") - msg(_(" Publisher:"), pub["Publisher"]) - msg(_(" Alias:"), pub["Alias"]) - - if "origins" in pub: - for od in pub["origins"]: - msg(_(" Origin URI:"), - od["Origin URI"]) - msg(_(" Origin Status:"), - od["Status"]) - if "Proxy" in od: - msg(_(" Proxy:"), - ", ".join(od["Proxy"])) - display_ssl_info(od) - - if "mirrors" in pub: - for md in pub["mirrors"]: - msg(_(" Mirror URI:"), - md["Mirror URI"]) - msg(_(" Mirror Status:"), - md["Status"]) - if "Proxy" in md: - msg(_(" Proxy:"), - ", ".join(md["Proxy"])) - display_ssl_info(md) - - msg(_(" Client UUID:"), - pub["Client UUID"]) - msg(_(" Catalog Updated:"), - pub["Catalog Updated"]) - display_signing_certs(pub) - msg(_(" Publisher enabled:"), - _(pub["enabled"])) - - if "Properties" not in pub: - continue - pub_items = sorted( - six.iteritems(pub["Properties"])) - property_padding = " " - properties_displayed = False - for k, v in pub_items: - if not v: - continue - if not properties_displayed: - msg(_(" Properties:")) - properties_displayed = True - if not isinstance(v, six.string_types): - v = ", ".join(sorted(v)) - msg(property_padding, k + " =", str(v)) - return retcode -def property_add_value(api_inst, args): - """pkg add-property-value propname propvalue""" +def property_remove_value(api_inst, args): + """pkg remove-property-value propname propvalue""" + + # ensure no options are passed in + subcommand = "remove-property-value" + opts, pargs = getopt.getopt(args, "") + try: + propname, propvalue = pargs + except ValueError: + usage(_("requires a property name and value"), cmd=subcommand) + + # XXX image property management should be in pkg.client.api + try: + img.remove_property_value(propname, propvalue) + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.ApiException as e: + error(str(e), cmd=subcommand) + return EXIT_OOPS + return EXIT_OK - # ensure no options are passed in - subcommand = "add-property-value" - opts, pargs = getopt.getopt(args, "") - try: - propname, propvalue = pargs - except ValueError: - usage(_("requires a property name and value"), cmd=subcommand) - # XXX image property management should be in pkg.client.api - try: - img.add_property_value(propname, propvalue) - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.ApiException as e: - error(str(e), cmd=subcommand) - return EXIT_OOPS - return EXIT_OK +def property_set(api_inst, args): + """pkg set-property propname propvalue [propvalue ...]""" + + # ensure no options are passed in + subcommand = "set-property" + opts, pargs = getopt.getopt(args, "") + try: + propname = pargs[0] + propvalues = pargs[1:] + except IndexError: + propvalues = [] + if len(propvalues) == 0: + usage( + _("requires a property name and at least one value"), cmd=subcommand + ) + elif ( + propname + not in ( + "publisher-search-order", + "signature-policy", + "signature-required-names", + ) + and len(propvalues) == 1 + ): + # All other properties are single value, so if only one (or no) + # value was specified, transform it. If multiple values were + # specified, allow the value to be passed on so that the + # configuration classes can re-raise the appropriate error. + propvalues = propvalues[0] + + props = {propname: propvalues} + if propname == "signature-policy": + policy = propvalues[0] + props[propname] = policy + params = propvalues[1:] + if policy != "require-names" and len(params): + usage( + _( + "Signature-policy {0} doesn't allow additional " + "parameters." + ).format(policy), + cmd=subcommand, + ) + elif policy == "require-names": + props["signature-required-names"] = params + + # XXX image property management should be in pkg.client.api + try: + img.set_properties(props) + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.ApiException as e: + error(str(e), cmd=subcommand) + return EXIT_OOPS + return EXIT_OK -def property_remove_value(api_inst, args): - """pkg remove-property-value propname propvalue""" - # ensure no options are passed in - subcommand = "remove-property-value" - opts, pargs = getopt.getopt(args, "") - try: - propname, propvalue = pargs - except ValueError: - usage(_("requires a property name and value"), cmd=subcommand) +def property_unset(api_inst, args): + """pkg unset-property propname ...""" - # XXX image property management should be in pkg.client.api - try: - img.remove_property_value(propname, propvalue) - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.ApiException as e: - error(str(e), cmd=subcommand) - return EXIT_OOPS - return EXIT_OK + # is this an existing property in our image? + # if so, delete it + # if not, error -def property_set(api_inst, args): - """pkg set-property propname propvalue [propvalue ...]""" + # ensure no options are passed in + subcommand = "unset-property" + opts, pargs = getopt.getopt(args, "") + if not pargs: + usage(_("requires at least one property name"), cmd=subcommand) - # ensure no options are passed in - subcommand = "set-property" - opts, pargs = getopt.getopt(args, "") + # XXX image property management should be in pkg.client.api + for p in pargs: try: - propname = pargs[0] - propvalues = pargs[1:] - except IndexError: - propvalues = [] - if len(propvalues) == 0: - usage(_("requires a property name and at least one value"), - cmd=subcommand) - elif propname not in ("publisher-search-order", - "signature-policy", "signature-required-names") and \ - len(propvalues) == 1: - # All other properties are single value, so if only one (or no) - # value was specified, transform it. If multiple values were - # specified, allow the value to be passed on so that the - # configuration classes can re-raise the appropriate error. - propvalues = propvalues[0] - - props = { propname: propvalues } - if propname == "signature-policy": - policy = propvalues[0] - props[propname] = policy - params = propvalues[1:] - if policy != "require-names" and len(params): - usage(_("Signature-policy {0} doesn't allow additional " - "parameters.").format(policy), cmd=subcommand) - elif policy == "require-names": - props["signature-required-names"] = params - - # XXX image property management should be in pkg.client.api - try: - img.set_properties(props) + img.delete_property(p) except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS + format_update_error(e) + return EXIT_OOPS except api_errors.ApiException as e: - error(str(e), cmd=subcommand) - return EXIT_OOPS - return EXIT_OK + error(str(e), cmd=subcommand) + return EXIT_OOPS -def property_unset(api_inst, args): - """pkg unset-property propname ...""" + return EXIT_OK - # is this an existing property in our image? - # if so, delete it - # if not, error - # ensure no options are passed in - subcommand = "unset-property" - opts, pargs = getopt.getopt(args, "") - if not pargs: - usage(_("requires at least one property name"), - cmd=subcommand) +def property_list(api_inst, args): + """pkg property [-H] [propname ...]""" + omit_headers = False - # XXX image property management should be in pkg.client.api - for p in pargs: - try: - img.delete_property(p) - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.ApiException as e: - error(str(e), cmd=subcommand) - return EXIT_OOPS + subcommand = "property" + opts, pargs = getopt.getopt(args, "H") + for opt, arg in opts: + if opt == "-H": + omit_headers = True - return EXIT_OK + # XXX image property management should be in pkg.client.api + for p in pargs: + if not img.has_property(p): + error(_("no such property: {0}").format(p), cmd=subcommand) + return EXIT_OOPS -def property_list(api_inst, args): - """pkg property [-H] [propname ...]""" - omit_headers = False - - subcommand = "property" - opts, pargs = getopt.getopt(args, "H") - for opt, arg in opts: - if opt == "-H": - omit_headers = True - - # XXX image property management should be in pkg.client.api - for p in pargs: - if not img.has_property(p): - error(_("no such property: {0}").format(p), - cmd=subcommand) - return EXIT_OOPS - - if not pargs: - # If specific properties were named, list them in the order - # requested; otherwise, list them sorted. - pargs = sorted(list(img.properties())) - - width = max(max([len(p) for p in pargs]), 8) - fmt = "{{0:{0}}} {{1}}".format(width) - if not omit_headers: - msg(fmt.format("PROPERTY", "VALUE")) + if not pargs: + # If specific properties were named, list them in the order + # requested; otherwise, list them sorted. + pargs = sorted(list(img.properties())) - for p in pargs: - msg(fmt.format(p, img.get_property(p))) + width = max(max([len(p) for p in pargs]), 8) + fmt = "{{0:{0}}} {{1}}".format(width) + if not omit_headers: + msg(fmt.format("PROPERTY", "VALUE")) - return EXIT_OK + for p in pargs: + msg(fmt.format(p, img.get_property(p))) + + return EXIT_OK -def list_variant(op, api_inst, pargs, omit_headers, output_format, - list_all_items, list_installed, verbose): - """pkg variant [-Haiv] [-F format] [ ...]""" - - subcommand = "variant" - if output_format is None: - output_format = "default" - - # To work around Python 2.x's scoping limits, a list is used. - found = [False] - req_variants = set(pargs) - - # If user explicitly provides variants, display implicit value even if - # not explicitly set in the image or found in a package. - implicit = req_variants and True or False - - def gen_current(): - for (name, val, pvals) in api_inst.gen_variants(variant_list, - implicit=implicit, patterns=req_variants): - if output_format == "default": - name_list = name.split(".")[1:] - name = ".".join(name_list) - found[0] = True - yield { - "variant": name, - "value": val - } - - def gen_possible(): - for (name, val, pvals) in api_inst.gen_variants(variant_list, - implicit=implicit, patterns=req_variants): - if output_format == "default": - name_list = name.split(".")[1:] - name = ".".join(name_list) - found[0] = True - for pval in pvals: - yield { - "variant": name, - "value": pval - } +def list_variant( + op, + api_inst, + pargs, + omit_headers, + output_format, + list_all_items, + list_installed, + verbose, +): + """pkg variant [-Haiv] [-F format] [ ...]""" + + subcommand = "variant" + if output_format is None: + output_format = "default" + + # To work around Python 2.x's scoping limits, a list is used. + found = [False] + req_variants = set(pargs) + + # If user explicitly provides variants, display implicit value even if + # not explicitly set in the image or found in a package. + implicit = req_variants and True or False + + def gen_current(): + for name, val, pvals in api_inst.gen_variants( + variant_list, implicit=implicit, patterns=req_variants + ): + if output_format == "default": + name_list = name.split(".")[1:] + name = ".".join(name_list) + found[0] = True + yield {"variant": name, "value": val} + + def gen_possible(): + for name, val, pvals in api_inst.gen_variants( + variant_list, implicit=implicit, patterns=req_variants + ): + if output_format == "default": + name_list = name.split(".")[1:] + name = ".".join(name_list) + found[0] = True + for pval in pvals: + yield {"variant": name, "value": pval} + + if verbose: + gen_listing = gen_possible + else: + gen_listing = gen_current + + if list_all_items: if verbose: - gen_listing = gen_possible + variant_list = api_inst.VARIANT_ALL_POSSIBLE else: - gen_listing = gen_current - - if list_all_items: - if verbose: - variant_list = api_inst.VARIANT_ALL_POSSIBLE - else: - variant_list = api_inst.VARIANT_ALL - elif list_installed: - if verbose: - variant_list = api_inst.VARIANT_INSTALLED_POSSIBLE - else: - variant_list = api_inst.VARIANT_INSTALLED + variant_list = api_inst.VARIANT_ALL + elif list_installed: + if verbose: + variant_list = api_inst.VARIANT_INSTALLED_POSSIBLE else: - if verbose: - variant_list = api_inst.VARIANT_IMAGE_POSSIBLE - else: - variant_list = api_inst.VARIANT_IMAGE - - # VARIANT VALUE - # - # - # ... - field_data = { - "variant" : [("default", "json", "tsv"), _("VARIANT"), ""], - "value" : [("default", "json", "tsv"), _("VALUE"), ""], - } - desired_field_order = (_("VARIANT"), _("VALUE")) - - # Default output formatting. - def_fmt = "{0:70} {1}" + variant_list = api_inst.VARIANT_INSTALLED + else: + if verbose: + variant_list = api_inst.VARIANT_IMAGE_POSSIBLE + else: + variant_list = api_inst.VARIANT_IMAGE + + # VARIANT VALUE + # + # + # ... + field_data = { + "variant": [("default", "json", "tsv"), _("VARIANT"), ""], + "value": [("default", "json", "tsv"), _("VALUE"), ""], + } + desired_field_order = (_("VARIANT"), _("VALUE")) + + # Default output formatting. + def_fmt = "{0:70} {1}" + + # print without trailing newline. + sys.stdout.write( + misc.get_listing( + desired_field_order, + field_data, + gen_listing(), + output_format, + def_fmt, + omit_headers, + ) + ) - # print without trailing newline. - sys.stdout.write(misc.get_listing(desired_field_order, - field_data, gen_listing(), output_format, def_fmt, - omit_headers)) + if not found[0] and req_variants: + if output_format == "default": + # Don't pollute other output formats. + error(_("no matching variants found"), cmd=subcommand) + return EXIT_OOPS - if not found[0] and req_variants: - if output_format == "default": - # Don't pollute other output formats. - error(_("no matching variants found"), - cmd=subcommand) - return EXIT_OOPS + # Successful if no variants exist or if at least one matched. + return EXIT_OK - # Successful if no variants exist or if at least one matched. - return EXIT_OK -def list_facet(op, api_inst, pargs, omit_headers, output_format, list_all_items, - list_masked, list_installed): - """pkg facet [-Hai] [-F format] [ ...]""" - - subcommand = "facet" - if output_format is None: - output_format = "default" - - # To work around Python 2.x's scoping limits, a list is used. - found = [False] - req_facets = set(pargs) - - facet_list = api_inst.FACET_IMAGE - if list_all_items: - facet_list = api_inst.FACET_ALL - elif list_installed: - facet_list = api_inst.FACET_INSTALLED - - # If user explicitly provides facets, display implicit value even if - # not explicitly set in the image or found in a package. - implicit = req_facets and True or False - - def gen_listing(): - for (name, val, src, masked) in \ - api_inst.gen_facets(facet_list, implicit=implicit, - patterns=req_facets): - if output_format == "default": - name_list = name.split(".")[1:] - name = ".".join(name_list) - found[0] = True - - if not list_masked and masked: - continue - - # "value" and "masked" are intentionally not _(). - yield { - "facet": name, - "value": val and "True" or "False", - "src": src, - "masked": masked and "True" or "False", - } - - # FACET VALUE - # - # - # ... - field_data = { - "facet" : [("default", "json", "tsv"), _("FACET"), ""], - "value" : [("default", "json", "tsv"), _("VALUE"), ""], - "src" : [("default", "json", "tsv"), _("SRC"), ""], - } - desired_field_order = (_("FACET"), _("VALUE"), _("SRC")) - def_fmt = "{0:64} {1:5} {2}" - - if list_masked: - # if we're displaying masked facets, we should also mark which - # facets are masked in the output. - field_data["masked"] = \ - [("default", "json", "tsv"), _("MASKED"), ""] - desired_field_order += (_("MASKED"),) - def_fmt = "{0:57} {1:5} {2:6} {3}" - - # print without trailing newline. - sys.stdout.write(misc.get_listing(desired_field_order, - field_data, gen_listing(), output_format, def_fmt, - omit_headers)) - - if not found[0] and req_facets: - if output_format == "default": - # Don't pollute other output formats. - error(_("no matching facets found"), - cmd=subcommand) - return EXIT_OOPS +def list_facet( + op, + api_inst, + pargs, + omit_headers, + output_format, + list_all_items, + list_masked, + list_installed, +): + """pkg facet [-Hai] [-F format] [ ...]""" + + subcommand = "facet" + if output_format is None: + output_format = "default" + + # To work around Python 2.x's scoping limits, a list is used. + found = [False] + req_facets = set(pargs) + + facet_list = api_inst.FACET_IMAGE + if list_all_items: + facet_list = api_inst.FACET_ALL + elif list_installed: + facet_list = api_inst.FACET_INSTALLED + + # If user explicitly provides facets, display implicit value even if + # not explicitly set in the image or found in a package. + implicit = req_facets and True or False + + def gen_listing(): + for name, val, src, masked in api_inst.gen_facets( + facet_list, implicit=implicit, patterns=req_facets + ): + if output_format == "default": + name_list = name.split(".")[1:] + name = ".".join(name_list) + found[0] = True + + if not list_masked and masked: + continue + + # "value" and "masked" are intentionally not _(). + yield { + "facet": name, + "value": val and "True" or "False", + "src": src, + "masked": masked and "True" or "False", + } + + # FACET VALUE + # + # + # ... + field_data = { + "facet": [("default", "json", "tsv"), _("FACET"), ""], + "value": [("default", "json", "tsv"), _("VALUE"), ""], + "src": [("default", "json", "tsv"), _("SRC"), ""], + } + desired_field_order = (_("FACET"), _("VALUE"), _("SRC")) + def_fmt = "{0:64} {1:5} {2}" + + if list_masked: + # if we're displaying masked facets, we should also mark which + # facets are masked in the output. + field_data["masked"] = [("default", "json", "tsv"), _("MASKED"), ""] + desired_field_order += (_("MASKED"),) + def_fmt = "{0:57} {1:5} {2:6} {3}" + + # print without trailing newline. + sys.stdout.write( + misc.get_listing( + desired_field_order, + field_data, + gen_listing(), + output_format, + def_fmt, + omit_headers, + ) + ) - # Successful if no facets exist or if at least one matched. - return EXIT_OK + if not found[0] and req_facets: + if output_format == "default": + # Don't pollute other output formats. + error(_("no matching facets found"), cmd=subcommand) + return EXIT_OOPS -def list_linked(op, api_inst, pargs, li_ignore, omit_headers): - """pkg list-linked [-H] + # Successful if no facets exist or if at least one matched. + return EXIT_OK - List all the linked images known to the current image.""" - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) +def list_linked(op, api_inst, pargs, li_ignore, omit_headers): + """pkg list-linked [-H] - li_list = api_inst.list_linked(li_ignore) - if len(li_list) == 0: - return EXIT_OK + List all the linked images known to the current image.""" - fmt = "" - li_header = [_("NAME"), _("RELATIONSHIP"), _("PATH")] - for col in range(0, len(li_header)): - width = max([len(row[col]) for row in li_list]) - width = max(width, len(li_header[col])) - if (fmt != ''): - fmt += "\t" - fmt += "{{{0}!s:{1}}}".format(col, width) + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) - if not omit_headers: - msg(fmt.format(*li_header)) - for row in li_list: - msg(fmt.format(*row)) + li_list = api_inst.list_linked(li_ignore) + if len(li_list) == 0: return EXIT_OK -def pubcheck_linked(op, api_inst, pargs): - """If we're a child image, verify that the parent image - publisher configuration is a subset of our publisher configuration. - If we have any children, recurse into them and perform a publisher - check.""" + fmt = "" + li_header = [_("NAME"), _("RELATIONSHIP"), _("PATH")] + for col in range(0, len(li_header)): + width = max([len(row[col]) for row in li_list]) + width = max(width, len(li_header[col])) + if fmt != "": + fmt += "\t" + fmt += "{{{0}!s:{1}}}".format(col, width) - try: - api_inst.linked_publisher_check() - except api_errors.ImageLockedError as e: - error(e) - return EXIT_LOCKED - except api_errors.ImageMissingKeyFile as e: - error(e) - return EXIT_EACCESS + if not omit_headers: + msg(fmt.format(*li_header)) + for row in li_list: + msg(fmt.format(*row)) + return EXIT_OK - return EXIT_OK -def hotfix_cleanup(op, api_inst, pargs): - try: - api_inst.hotfix_origin_cleanup() - except api_errors.ImageLockedError as e: - error(e) - return EXIT_LOCKED - except api_errors.ImageMissingKeyFile as e: - error(e) - return EXIT_EACCESS - except api_errors.UnprivilegedUserError as e: - error(e) - return EXIT_OOPS +def pubcheck_linked(op, api_inst, pargs): + """If we're a child image, verify that the parent image + publisher configuration is a subset of our publisher configuration. + If we have any children, recurse into them and perform a publisher + check.""" - return EXIT_OK + try: + api_inst.linked_publisher_check() + except api_errors.ImageLockedError as e: + error(e) + return EXIT_LOCKED + except api_errors.ImageMissingKeyFile as e: + error(e) + return EXIT_EACCESS -def __parse_linked_props(args, op): - """"Parse linked image property options that were specified on the - command line into a dictionary. Make sure duplicate properties were - not specified.""" + return EXIT_OK - linked_props = dict() - for pv in args: - try: - p, v = pv.split("=", 1) - except ValueError: - usage(_("linked image property arguments must be of " - "the form '='."), cmd=op) - if p not in li.prop_values: - usage(_("invalid linked image property: " - "'{0}'.").format(p), cmd=op) +def hotfix_cleanup(op, api_inst, pargs): + try: + api_inst.hotfix_origin_cleanup() + except api_errors.ImageLockedError as e: + error(e) + return EXIT_LOCKED + except api_errors.ImageMissingKeyFile as e: + error(e) + return EXIT_EACCESS + except api_errors.UnprivilegedUserError as e: + error(e) + return EXIT_OOPS - if p in linked_props: - usage(_("linked image property specified multiple " - "times: '{0}'.").format(p), cmd=op) + return EXIT_OK - linked_props[p] = v - return linked_props +def __parse_linked_props(args, op): + """ "Parse linked image property options that were specified on the + command line into a dictionary. Make sure duplicate properties were + not specified.""" -def list_property_linked(op, api_inst, pargs, - li_name, omit_headers): - """pkg property-linked [-H] [-l ] [propname ...] + linked_props = dict() + for pv in args: + try: + p, v = pv.split("=", 1) + except ValueError: + usage( + _( + "linked image property arguments must be of " + "the form '='." + ), + cmd=op, + ) - List the linked image properties associated with a child or parent - image.""" + if p not in li.prop_values: + usage( + _("invalid linked image property: " "'{0}'.").format(p), cmd=op + ) - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) + if p in linked_props: + usage( + _( + "linked image property specified multiple " "times: '{0}'." + ).format(p), + cmd=op, + ) - lin = None - if li_name: - lin = api_inst.parse_linked_name(li_name) - props = api_inst.get_linked_props(lin=lin) + linked_props[p] = v - for p in pargs: - if p not in props.keys(): - error(_("{op}: no such property: {p}").format( - op=op, p=p)) - return EXIT_OOPS + return linked_props - if len(props) == 0: - return EXIT_OK - if not pargs: - pargs = props.keys() +def list_property_linked(op, api_inst, pargs, li_name, omit_headers): + """pkg property-linked [-H] [-l ] [propname ...] - width = max(max([len(p) for p in pargs if props[p]]), 8) - fmt = "{{0:{0}}}\t{{1}}".format(width) - if not omit_headers: - msg(fmt.format("PROPERTY", "VALUE")) - for p in sorted(pargs): - if not props[p]: - continue - msg(fmt.format(p, props[p])) + List the linked image properties associated with a child or parent + image.""" - return EXIT_OK + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) -def set_property_linked(op, api_inst, pargs, - accept, backup_be, backup_be_name, be_activate, be_name, li_ignore, - li_md_only, li_name, li_parent_sync, li_pkg_updates, new_be, noexecute, - origins, parsable_version, quiet, refresh_catalogs, reject_pats, - show_licenses, update_index, verbose): - """pkg set-property-linked - [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh] - [--no-parent-sync] [--no-pkg-updates] - [--linked-md-only] = ... - - Change the specified linked image properties. This may result in - updating the package contents of a child image.""" - - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) - - # make sure we're a child image - if li_name: - lin = api_inst.parse_linked_name(li_name) - else: - lin = api_inst.get_linked_name() + lin = None + if li_name: + lin = api_inst.parse_linked_name(li_name) + props = api_inst.get_linked_props(lin=lin) - xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) - if not xrval: - return EXIT_OOPS + for p in pargs: + if p not in props.keys(): + error(_("{op}: no such property: {p}").format(op=op, p=p)) + return EXIT_OOPS + if len(props) == 0: return EXIT_OK -def audit_linked(op, api_inst, pargs, + if not pargs: + pargs = props.keys() + + width = max(max([len(p) for p in pargs if props[p]]), 8) + fmt = "{{0:{0}}}\t{{1}}".format(width) + if not omit_headers: + msg(fmt.format("PROPERTY", "VALUE")) + for p in sorted(pargs): + if not props[p]: + continue + msg(fmt.format(p, props[p])) + + return EXIT_OK + + +def set_property_linked( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_md_only, + li_name, + li_parent_sync, + li_pkg_updates, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + update_index, + verbose, +): + """pkg set-property-linked + [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh] + [--no-parent-sync] [--no-pkg-updates] + [--linked-md-only] = ... + + Change the specified linked image properties. This may result in + updating the package contents of a child image.""" + + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) + + # make sure we're a child image + if li_name: + lin = api_inst.parse_linked_name(li_name) + else: + lin = api_inst.get_linked_name() + + xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) + if not xrval: + return EXIT_OOPS + + return EXIT_OK + + +def audit_linked( + op, + api_inst, + pargs, li_parent_sync, li_target_all, li_target_list, omit_headers, - quiet): - """pkg audit-linked [-a|-l ] + quiet, +): + """pkg audit-linked [-a|-l ] + + Audit one or more child images to see if they are in sync + with their parent image.""" + + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) + + # audit the requested child image(s) + if not li_target_all and not li_target_list: + # audit the current image + rvdict = api_inst.audit_linked(li_parent_sync=li_parent_sync) + else: + # audit the requested child image(s) + rvdict = api_inst.audit_linked_children(li_target_list) + if not rvdict: + # may not have had any children + return EXIT_OK + + # display audit return values + width = max(max([len(k) for k in rvdict.keys()]), 8) + fmt = "{{0!s:{0}}}\t{{1}}".format(width) + if not omit_headers: + msg(fmt.format("NAME", "STATUS")) + + if not quiet: + for k, (rv, err, p_dict) in rvdict.items(): + if rv == EXIT_OK: + msg(fmt.format(k, _("synced"))) + elif rv == EXIT_DIVERGED: + msg(fmt.format(k, _("diverged"))) + + rv, err, p_dicts = api_inst.audit_linked_rvdict2rv(rvdict) + if err: + error(err, cmd=op) + return rv + + +def sync_linked( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_md_only, + li_parent_sync, + li_pkg_updates, + li_target_all, + li_target_list, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, +): + """pkg sync-linked [-a|-l ] + [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh] + [--no-parent-sync] [--no-pkg-updates] + [--linked-md-only] [-a|-l ] + + Sync one or more child images with their parent image.""" + + xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) + if not xrval: + return EXIT_OOPS + + if not li_target_all and not li_target_list: + # sync the current image + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _parsable_version=parsable_version, + _quiet=quiet, + _show_licenses=show_licenses, + _stage=stage, + _verbose=verbose, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + li_md_only=li_md_only, + li_parent_sync=li_parent_sync, + li_pkg_updates=li_pkg_updates, + new_be=new_be, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + ) - Audit one or more child images to see if they are in sync - with their parent image.""" + # sync the requested child image(s) + api_inst.progresstracker.set_major_phase( + api_inst.progresstracker.PHASE_UTILITY + ) + rvdict = api_inst.sync_linked_children( + li_target_list, + noexecute=noexecute, + accept=accept, + show_licenses=show_licenses, + refresh_catalogs=refresh_catalogs, + update_index=update_index, + li_pkg_updates=li_pkg_updates, + li_md_only=li_md_only, + ) + + rv, err, p_dicts = api_inst.sync_linked_rvdict2rv(rvdict) + if err: + error(err, cmd=op) + if parsable_version is not None and rv == EXIT_OK: + try: + __display_parsable_plan(api_inst, parsable_version, p_dicts) + except api_errors.ApiException as e: + error(e, cmd=op) + return EXIT_OOPS + return rv + + +def attach_linked( + op, + api_inst, + pargs, + accept, + allow_relink, + attach_child, + attach_parent, + be_activate, + backup_be, + backup_be_name, + be_name, + force, + li_ignore, + li_md_only, + li_parent_sync, + li_pkg_updates, + li_props, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + update_index, + verbose, +): + """pkg attach-linked + [-fnvq] [--accept] [--licenses] [--no-index] [--no-refresh] + [--no-pkg-updates] [--linked-md-only] + [--allow-relink] + [--parsable-version=] + [--prop-linked = ...] + (-c|-p) + + Attach a child linked image. The child could be this image attaching + itself to a parent, or another image being attach as a child with + this image being the parent.""" + + for k, v in li_props: + if k in [li.PROP_PATH, li.PROP_NAME, li.PROP_MODEL]: + usage( + _("cannot specify linked image property: " "'{0}'").format(k), + cmd=op, + ) + + if len(pargs) < 2: + usage(_("a linked image name and path must be specified"), cmd=op) + + li_name = pargs[0] + li_path = pargs[1] + + # parse the specified name + lin = api_inst.parse_linked_name(li_name, allow_unknown=True) + + xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) + if not xrval: + return EXIT_OOPS + + if attach_parent: + # attach the current image to a parent + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _parsable_version=parsable_version, + _quiet=quiet, + _show_licenses=show_licenses, + _verbose=verbose, + allow_relink=allow_relink, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + force=force, + li_md_only=li_md_only, + li_path=li_path, + li_pkg_updates=li_pkg_updates, + li_props=li_props, + lin=lin, + new_be=new_be, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + ) - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) + # attach the requested child image + api_inst.progresstracker.set_major_phase( + api_inst.progresstracker.PHASE_UTILITY + ) + (rv, err, p_dict) = api_inst.attach_linked_child( + lin, + li_path, + li_props, + accept=accept, + allow_relink=allow_relink, + force=force, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + show_licenses=show_licenses, + update_index=update_index, + ) + + if err: + error(err, cmd=op) + if parsable_version is not None and rv == EXIT_OK: + assert p_dict is not None + try: + __display_parsable_plan(api_inst, parsable_version, [p_dict]) + except api_errors.ApiException as e: + error(e, cmd=op) + return EXIT_OOPS + return rv + + +def detach_linked( + op, + api_inst, + pargs, + force, + li_md_only, + li_pkg_updates, + li_target_all, + li_target_list, + noexecute, + quiet, + verbose, +): + """pkg detach-linked + [-fnvq] [-a|-l ] [--linked-md-only] + + Detach one or more child linked images.""" + + if not li_target_all and not li_target_list: + # detach the current image + return __api_op( + op, + api_inst, + _noexecute=noexecute, + _quiet=quiet, + _verbose=verbose, + force=force, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + ) - # audit the requested child image(s) - if not li_target_all and not li_target_list: - # audit the current image - rvdict = api_inst.audit_linked(li_parent_sync=li_parent_sync) - else: - # audit the requested child image(s) - rvdict = api_inst.audit_linked_children(li_target_list) - if not rvdict: - # may not have had any children - return EXIT_OK - - # display audit return values - width = max(max([len(k) for k in rvdict.keys()]), 8) - fmt = "{{0!s:{0}}}\t{{1}}".format(width) - if not omit_headers: - msg(fmt.format("NAME", "STATUS")) + api_inst.progresstracker.set_major_phase( + api_inst.progresstracker.PHASE_UTILITY + ) + rvdict = api_inst.detach_linked_children( + li_target_list, + force=force, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + ) - if not quiet: - for k, (rv, err, p_dict) in rvdict.items(): - if rv == EXIT_OK: - msg(fmt.format(k, _("synced"))) - elif rv == EXIT_DIVERGED: - msg(fmt.format(k, _("diverged"))) + rv, err, p_dicts = api_inst.detach_linked_rvdict2rv(rvdict) + if err: + error(err, cmd=op) + return rv - rv, err, p_dicts = api_inst.audit_linked_rvdict2rv(rvdict) - if err: - error(err, cmd=op) - return rv -def sync_linked(op, api_inst, pargs, accept, backup_be, backup_be_name, - be_activate, be_name, li_ignore, li_md_only, li_parent_sync, - li_pkg_updates, li_target_all, li_target_list, new_be, noexecute, origins, - parsable_version, quiet, refresh_catalogs, reject_pats, show_licenses, - stage, update_index, verbose): - """pkg sync-linked [-a|-l ] - [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh] - [--no-parent-sync] [--no-pkg-updates] - [--linked-md-only] [-a|-l ] +def image_create(args): + """Create an image of the requested kind, at the given path. Load + catalog for initial publisher for convenience. + + At present, it is legitimate for a user image to specify that it will be + deployed in a zone. An easy example would be a program with an optional + component that consumes global zone-only information, such as various + kernel statistics or device information.""" + + cmd_name = "image-create" + + force = False + imgtype = IMG_TYPE_USER + is_zone = False + add_mirrors = set() + add_origins = set() + pub_name = None + pub_url = None + refresh_allowed = True + ssl_key = None + ssl_cert = None + variants = {} + facets = pkg.facet.Facets() + set_props = {} + version = None + + opts, pargs = getopt.getopt( + args, + "fFPUzg:m:p:k:c:", + [ + "force", + "full", + "partial", + "user", + "zone", + "facet=", + "mirror=", + "origin=", + "publisher=", + "no-refresh", + "variant=", + "set-property=", + ], + ) + + for opt, arg in opts: + if opt in ("-p", "--publisher"): + if pub_url: + usage( + _("The -p option can be specified only " "once."), + cmd=cmd_name, + ) + try: + pub_name, pub_url = arg.split("=", 1) + except ValueError: + pub_name = None + pub_url = arg + if pub_url: + pub_url = misc.parse_uri(pub_url, cwd=orig_cwd) + elif opt == "-c": + ssl_cert = arg + elif opt == "-f" or opt == "--force": + force = True + elif opt in ("-g", "--origin"): + add_origins.add(misc.parse_uri(arg, cwd=orig_cwd)) + elif opt == "-k": + ssl_key = arg + elif opt in ("-m", "--mirror"): + add_mirrors.add(misc.parse_uri(arg, cwd=orig_cwd)) + elif opt == "-z" or opt == "--zone": + is_zone = True + imgtype = IMG_TYPE_ENTIRE + elif opt == "-F" or opt == "--full": + imgtype = IMG_TYPE_ENTIRE + elif opt == "-P" or opt == "--partial": + imgtype = IMG_TYPE_PARTIAL + elif opt == "-U" or opt == "--user": + imgtype = IMG_TYPE_USER + elif opt == "--facet": + allow = {"TRUE": True, "FALSE": False} + try: + f_name, f_value = arg.split("=", 1) + except ValueError: + f_name = arg + f_value = "" + if not f_name.startswith("facet."): + f_name = "facet.{0}".format(f_name) + if not f_name or f_value.upper() not in allow: + usage( + _( + "Facet arguments must be of the " + "form '=(True|False)'" + ), + cmd=cmd_name, + ) + facets[f_name] = allow[f_value.upper()] + elif opt == "--no-refresh": + refresh_allowed = False + elif opt == "--set-property": + t = arg.split("=", 1) + if len(t) < 2: + usage( + _( + "properties to be set must be of the " + "form '='. This is what was " + "given: {0}" + ).format(arg), + cmd=cmd_name, + ) + if t[0] in set_props: + usage( + _( + "a property may only be set once in a " + "command. {0} was set twice" + ).format(t[0]), + cmd=cmd_name, + ) + set_props[t[0]] = t[1] + elif opt == "--variant": + try: + v_name, v_value = arg.split("=", 1) + if not v_name.startswith("variant."): + v_name = "variant.{0}".format(v_name) + except ValueError: + usage( + _( + "variant arguments must be of the " + "form '='." + ), + cmd=cmd_name, + ) + variants[v_name] = v_value + + if not pargs: + usage(_("an image directory path must be specified"), cmd=cmd_name) + elif len(pargs) > 1: + usage(_("only one image directory path may be specified"), cmd=cmd_name) + image_dir = pargs[0] + + if pub_url and not pub_name and not refresh_allowed: + usage( + _( + "--no-refresh cannot be used with -p unless a " + "publisher prefix is provided." + ), + cmd=cmd_name, + ) - Sync one or more child images with their parent image.""" + if not pub_url and (add_origins or add_mirrors): + usage( + _("A publisher must be specified if -g or -m are used."), + cmd=cmd_name, + ) - xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) - if not xrval: - return EXIT_OOPS + if not refresh_allowed and pub_url: + # Auto-config can't be done if refresh isn't allowed, so treat + # this as a manual configuration case. + add_origins.add(pub_url) + repo_uri = None + else: + repo_uri = pub_url + + # Get sanitized SSL Cert/Key input values. + ssl_cert, ssl_key = _get_ssl_cert_key(image_dir, is_zone, ssl_cert, ssl_key) + + progtrack = get_tracker() + progtrack.set_major_phase(progtrack.PHASE_UTILITY) + global _api_inst + global img + try: + _api_inst = api.image_create( + PKG_CLIENT_NAME, + CLIENT_API_VERSION, + image_dir, + imgtype, + is_zone, + facets=facets, + force=force, + mirrors=list(add_mirrors), + origins=list(add_origins), + prefix=pub_name, + progtrack=progtrack, + refresh_allowed=refresh_allowed, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + repo_uri=repo_uri, + variants=variants, + props=set_props, + ) + img = _api_inst.img + except api_errors.InvalidDepotResponseException as e: + # Ensure messages are displayed after the spinner. + logger.error("\n") + error( + _( + "The URI '{pub_url}' does not appear to point to a " + "valid pkg repository.\nPlease check the repository's " + "location and the client's network configuration." + "\nAdditional details:\n\n{error}" + ).format(pub_url=pub_url, error=e), + cmd=cmd_name, + ) + print_proxy_config() + return EXIT_OOPS + except api_errors.CatalogRefreshException as cre: + # Ensure messages are displayed after the spinner. + error("", cmd=cmd_name) + if display_catalog_failures(cre) == 0: + return EXIT_OOPS + else: + return EXIT_PARTIAL + except api_errors.ApiException as e: + error(str(e), cmd=cmd_name) + return EXIT_OOPS + finally: + # Normally this would get flushed by handle_errors + # but that won't happen if the above code throws, because + # _api_inst will be None. + progtrack.flush() - if not li_target_all and not li_target_list: - # sync the current image - return __api_op(op, api_inst, _accept=accept, - _li_ignore=li_ignore, _noexecute=noexecute, - _origins=origins, _parsable_version=parsable_version, - _quiet=quiet, _show_licenses=show_licenses, _stage=stage, - _verbose=verbose, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, li_md_only=li_md_only, - li_parent_sync=li_parent_sync, - li_pkg_updates=li_pkg_updates, new_be=new_be, - refresh_catalogs=refresh_catalogs, - reject_list=reject_pats, - update_index=update_index) - - # sync the requested child image(s) - api_inst.progresstracker.set_major_phase( - api_inst.progresstracker.PHASE_UTILITY) - rvdict = api_inst.sync_linked_children(li_target_list, - noexecute=noexecute, accept=accept, show_licenses=show_licenses, - refresh_catalogs=refresh_catalogs, update_index=update_index, - li_pkg_updates=li_pkg_updates, li_md_only=li_md_only) - - rv, err, p_dicts = api_inst.sync_linked_rvdict2rv(rvdict) - if err: - error(err, cmd=op) - if parsable_version is not None and rv == EXIT_OK: - try: - __display_parsable_plan(api_inst, parsable_version, - p_dicts) - except api_errors.ApiException as e: - error(e, cmd=op) - return EXIT_OOPS - return rv + return EXIT_OK -def attach_linked(op, api_inst, pargs, - accept, allow_relink, attach_child, attach_parent, be_activate, - backup_be, backup_be_name, be_name, force, li_ignore, li_md_only, - li_parent_sync, li_pkg_updates, li_props, new_be, noexecute, origins, - parsable_version, quiet, refresh_catalogs, reject_pats, show_licenses, - update_index, verbose): - """pkg attach-linked - [-fnvq] [--accept] [--licenses] [--no-index] [--no-refresh] - [--no-pkg-updates] [--linked-md-only] - [--allow-relink] - [--parsable-version=] - [--prop-linked = ...] - (-c|-p) - - Attach a child linked image. The child could be this image attaching - itself to a parent, or another image being attach as a child with - this image being the parent.""" - - for k, v in li_props: - if k in [li.PROP_PATH, li.PROP_NAME, li.PROP_MODEL]: - usage(_("cannot specify linked image property: " - "'{0}'").format(k), cmd=op) - - if len(pargs) < 2: - usage(_("a linked image name and path must be specified"), - cmd=op) - - li_name = pargs[0] - li_path = pargs[1] - - # parse the specified name - lin = api_inst.parse_linked_name(li_name, allow_unknown=True) - - xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op) - if not xrval: - return EXIT_OOPS - if attach_parent: - # attach the current image to a parent - return __api_op(op, api_inst, _accept=accept, - _li_ignore=li_ignore, _noexecute=noexecute, - _origins=origins, _parsable_version=parsable_version, - _quiet=quiet, _show_licenses=show_licenses, - _verbose=verbose, allow_relink=allow_relink, - backup_be=backup_be, backup_be_name=backup_be_name, - be_activate=be_activate, be_name=be_name, force=force, - li_md_only=li_md_only, li_path=li_path, - li_pkg_updates=li_pkg_updates, li_props=li_props, - lin=lin, new_be=new_be, refresh_catalogs=refresh_catalogs, - reject_list=reject_pats, update_index=update_index) - - # attach the requested child image - api_inst.progresstracker.set_major_phase( - api_inst.progresstracker.PHASE_UTILITY) - (rv, err, p_dict) = api_inst.attach_linked_child(lin, li_path, li_props, - accept=accept, allow_relink=allow_relink, force=force, - li_md_only=li_md_only, li_pkg_updates=li_pkg_updates, - noexecute=noexecute, refresh_catalogs=refresh_catalogs, - reject_list=reject_pats, show_licenses=show_licenses, - update_index=update_index) +def rebuild_index(api_inst, pargs): + """pkg rebuild-index - if err: - error(err, cmd=op) - if parsable_version is not None and rv == EXIT_OK: - assert p_dict is not None - try: - __display_parsable_plan(api_inst, parsable_version, - [p_dict]) - except api_errors.ApiException as e: - error(e, cmd=op) - return EXIT_OOPS - return rv + Forcibly rebuild the search indexes. Will remove existing indexes + and build new ones from scratch.""" -def detach_linked(op, api_inst, pargs, force, li_md_only, li_pkg_updates, - li_target_all, li_target_list, noexecute, quiet, verbose): - """pkg detach-linked - [-fnvq] [-a|-l ] [--linked-md-only] + if pargs: + usage( + _("command does not take operands ('{0}')").format(" ".join(pargs)), + cmd="rebuild-index", + ) - Detach one or more child linked images.""" + try: + api_inst.rebuild_search_index() + except api_errors.ImageFormatUpdateNeeded as e: + format_update_error(e) + return EXIT_OOPS + except api_errors.CorruptedIndexException: + error( + "The search index appears corrupted. Please rebuild the " + "index with 'pkg rebuild-index'.", + cmd="rebuild-index", + ) + return EXIT_OOPS + except api_errors.ProblematicPermissionsIndexException as e: + error(str(e)) + error( + _( + "\n(Failure to consistently execute pkg commands as a " + "privileged user is often a source of this problem.)" + ) + ) + return EXIT_OOPS + else: + return EXIT_OK - if not li_target_all and not li_target_list: - # detach the current image - return __api_op(op, api_inst, _noexecute=noexecute, - _quiet=quiet, _verbose=verbose, force=force, - li_md_only=li_md_only, li_pkg_updates=li_pkg_updates) - api_inst.progresstracker.set_major_phase( - api_inst.progresstracker.PHASE_UTILITY) - rvdict = api_inst.detach_linked_children(li_target_list, force=force, - li_md_only=li_md_only, li_pkg_updates=li_pkg_updates, - noexecute=noexecute) +def history_list(api_inst, args): + """Display history about the current image.""" + + # define column name, header, field width and attribute name + # we compute 'reason', 'time' and 'release_note' columns ourselves + history_cols = { + "be": (_("BE"), "20", "operation_be"), + "be_uuid": (_("BE UUID"), "41", "operation_be_uuid"), + "client": (_("CLIENT"), "19", "client_name"), + "client_ver": (_("VERSION"), "15", "client_version"), + "command": (_("COMMAND"), "", "client_args"), + "finish": (_("FINISH"), "25", "operation_end_time"), + "id": (_("ID"), "10", "operation_userid"), + "new_be": (_("NEW BE"), "20", "operation_new_be"), + "new_be_uuid": (_("NEW BE UUID"), "41", "operation_new_be_uuid"), + "operation": (_("OPERATION"), "25", "operation_name"), + "outcome": (_("OUTCOME"), "12", "operation_result"), + "reason": (_("REASON"), "10", None), + "release_notes": (_("RELEASE NOTES"), "12", None), + "snapshot": (_("SNAPSHOT"), "20", "operation_snapshot"), + "start": (_("START"), "25", "operation_start_time"), + "time": (_("TIME"), "10", None), + "user": (_("USER"), "10", "operation_username"), + # omitting start state, end state, errors for now + # as these don't nicely fit into columns + } + + omit_headers = False + long_format = False + column_format = False + show_notes = False + display_limit = None # Infinite + time_vals = [] # list of timestamps for which we want history events + columns = ["start", "operation", "client", "outcome"] + + opts, pargs = getopt.getopt(args, "HNln:o:t:") + for opt, arg in opts: + if opt == "-H": + omit_headers = True + elif opt == "-N": + show_notes = True + elif opt == "-l": + long_format = True + elif opt == "-n": + try: + display_limit = int(arg) + except ValueError: + logger.error(_("Argument to -n must be numeric")) + return EXIT_BADOPT + + if display_limit <= 0: + logger.error(_("Argument to -n must be positive")) + return EXIT_BADOPT + elif opt == "-o": + column_format = True + columns = arg.split(",") + + # 'command' and 'reason' are multi-field columns, we + # insist they be the last item in the -o output, + # otherwise scripts could be broken by different numbers + # of output fields + if "command" in columns and "reason" in columns: + # Translators: 'command' and 'reason' are + # keywords and should not be translated + logger.error( + _( + "'command' and 'reason' columns " + "cannot be used together." + ) + ) + return EXIT_BADOPT - rv, err, p_dicts = api_inst.detach_linked_rvdict2rv(rvdict) - if err: - error(err, cmd=op) - return rv + for col in ["command", "reason"]: + if col in columns and columns.index(col) != len(columns) - 1: + logger.error( + _( + "The '{0}' column must be the " + "last item in the -o list" + ).format(col) + ) + return EXIT_BADOPT -def image_create(args): - """Create an image of the requested kind, at the given path. Load - catalog for initial publisher for convenience. - - At present, it is legitimate for a user image to specify that it will be - deployed in a zone. An easy example would be a program with an optional - component that consumes global zone-only information, such as various - kernel statistics or device information.""" - - cmd_name = "image-create" - - force = False - imgtype = IMG_TYPE_USER - is_zone = False - add_mirrors = set() - add_origins = set() - pub_name = None - pub_url = None - refresh_allowed = True - ssl_key = None - ssl_cert = None - variants = {} - facets = pkg.facet.Facets() - set_props = {} - version = None - - opts, pargs = getopt.getopt(args, "fFPUzg:m:p:k:c:", - ["force", "full", "partial", "user", "zone", "facet=", "mirror=", - "origin=", "publisher=", "no-refresh", "variant=", - "set-property="]) - - for opt, arg in opts: - if opt in ("-p", "--publisher"): - if pub_url: - usage(_("The -p option can be specified only " - "once."), cmd=cmd_name) - try: - pub_name, pub_url = arg.split("=", 1) - except ValueError: - pub_name = None - pub_url = arg - if pub_url: - pub_url = misc.parse_uri(pub_url, cwd=orig_cwd) - elif opt == "-c": - ssl_cert = arg - elif opt == "-f" or opt == "--force": - force = True - elif opt in ("-g", "--origin"): - add_origins.add(misc.parse_uri(arg, cwd=orig_cwd)) - elif opt == "-k": - ssl_key = arg - elif opt in ("-m", "--mirror"): - add_mirrors.add(misc.parse_uri(arg, cwd=orig_cwd)) - elif opt == "-z" or opt == "--zone": - is_zone = True - imgtype = IMG_TYPE_ENTIRE - elif opt == "-F" or opt == "--full": - imgtype = IMG_TYPE_ENTIRE - elif opt == "-P" or opt == "--partial": - imgtype = IMG_TYPE_PARTIAL - elif opt == "-U" or opt == "--user": - imgtype = IMG_TYPE_USER - elif opt == "--facet": - allow = { "TRUE": True, "FALSE": False } - try: - f_name, f_value = arg.split("=", 1) - except ValueError: - f_name = arg - f_value = "" - if not f_name.startswith("facet."): - f_name = "facet.{0}".format(f_name) - if not f_name or f_value.upper() not in allow: - usage(_("Facet arguments must be of the " - "form '=(True|False)'"), - cmd=cmd_name) - facets[f_name] = allow[f_value.upper()] - elif opt == "--no-refresh": - refresh_allowed = False - elif opt == "--set-property": - t = arg.split("=", 1) - if len(t) < 2: - usage(_("properties to be set must be of the " - "form '='. This is what was " - "given: {0}").format(arg), cmd=cmd_name) - if t[0] in set_props: - usage(_("a property may only be set once in a " - "command. {0} was set twice").format(t[0]), - cmd=cmd_name) - set_props[t[0]] = t[1] - elif opt == "--variant": - try: - v_name, v_value = arg.split("=", 1) - if not v_name.startswith("variant."): - v_name = "variant.{0}".format(v_name) - except ValueError: - usage(_("variant arguments must be of the " - "form '='."), - cmd=cmd_name) - variants[v_name] = v_value - - if not pargs: - usage(_("an image directory path must be specified"), - cmd=cmd_name) - elif len(pargs) > 1: - usage(_("only one image directory path may be specified"), - cmd=cmd_name) - image_dir = pargs[0] - - if pub_url and not pub_name and not refresh_allowed: - usage(_("--no-refresh cannot be used with -p unless a " - "publisher prefix is provided."), cmd=cmd_name) - - if not pub_url and (add_origins or add_mirrors): - usage(_("A publisher must be specified if -g or -m are used."), - cmd=cmd_name) - - if not refresh_allowed and pub_url: - # Auto-config can't be done if refresh isn't allowed, so treat - # this as a manual configuration case. - add_origins.add(pub_url) - repo_uri = None - else: - repo_uri = pub_url + for col in columns: + if col not in history_cols: + logger.error( + _("Unknown output column " "'{0}'").format(col) + ) + return EXIT_BADOPT + if not __unique_columns(columns): + return EXIT_BADOPT - # Get sanitized SSL Cert/Key input values. - ssl_cert, ssl_key = _get_ssl_cert_key(image_dir, is_zone, ssl_cert, - ssl_key) + elif opt == "-t": + time_vals.extend(arg.split(",")) - progtrack = get_tracker() - progtrack.set_major_phase(progtrack.PHASE_UTILITY) - global _api_inst - global img - try: - _api_inst = api.image_create(PKG_CLIENT_NAME, CLIENT_API_VERSION, - image_dir, imgtype, is_zone, facets=facets, force=force, - mirrors=list(add_mirrors), origins=list(add_origins), - prefix=pub_name, progtrack=progtrack, - refresh_allowed=refresh_allowed, ssl_cert=ssl_cert, - ssl_key=ssl_key, repo_uri=repo_uri, variants=variants, - props=set_props) - img = _api_inst.img - except api_errors.InvalidDepotResponseException as e: - # Ensure messages are displayed after the spinner. - logger.error("\n") - error(_("The URI '{pub_url}' does not appear to point to a " - "valid pkg repository.\nPlease check the repository's " - "location and the client's network configuration." - "\nAdditional details:\n\n{error}").format( - pub_url=pub_url, error=e), - cmd=cmd_name) - print_proxy_config() - return EXIT_OOPS - except api_errors.CatalogRefreshException as cre: - # Ensure messages are displayed after the spinner. - error("", cmd=cmd_name) - if display_catalog_failures(cre) == 0: - return EXIT_OOPS - else: - return EXIT_PARTIAL - except api_errors.ApiException as e: - error(str(e), cmd=cmd_name) - return EXIT_OOPS - finally: - # Normally this would get flushed by handle_errors - # but that won't happen if the above code throws, because - # _api_inst will be None. - progtrack.flush() + if omit_headers and long_format: + usage(_("-H and -l may not be combined"), cmd="history") - return EXIT_OK + if column_format and long_format: + usage(_("-o and -l may not be combined"), cmd="history") -def rebuild_index(api_inst, pargs): - """pkg rebuild-index + if time_vals and display_limit: + usage(_("-n and -t may not be combined"), cmd="history") - Forcibly rebuild the search indexes. Will remove existing indexes - and build new ones from scratch.""" + if column_format and show_notes: + usage(_("-o and -N may not be combined"), cmd="history") - if pargs: - usage(_("command does not take operands ('{0}')").format( - " ".join(pargs)), cmd="rebuild-index") + if long_format and show_notes: + usage(_("-l and -N may not be combined"), cmd="history") - try: - api_inst.rebuild_search_index() - except api_errors.ImageFormatUpdateNeeded as e: - format_update_error(e) - return EXIT_OOPS - except api_errors.CorruptedIndexException: - error("The search index appears corrupted. Please rebuild the " - "index with 'pkg rebuild-index'.", cmd="rebuild-index") - return EXIT_OOPS - except api_errors.ProblematicPermissionsIndexException as e: - error(str(e)) - error(_("\n(Failure to consistently execute pkg commands as a " - "privileged user is often a source of this problem.)")) - return EXIT_OOPS - else: - return EXIT_OK + history_fmt = None -def history_list(api_inst, args): - """Display history about the current image. - """ - - # define column name, header, field width and attribute name - # we compute 'reason', 'time' and 'release_note' columns ourselves - history_cols = { - "be": (_("BE"), "20", "operation_be"), - "be_uuid": (_("BE UUID"), "41", "operation_be_uuid"), - "client": (_("CLIENT"), "19", "client_name"), - "client_ver": (_("VERSION"), "15", "client_version"), - "command": (_("COMMAND"), "", "client_args"), - "finish": (_("FINISH"), "25", "operation_end_time"), - "id": (_("ID"), "10", "operation_userid"), - "new_be": (_("NEW BE"), "20", "operation_new_be"), - "new_be_uuid": (_("NEW BE UUID"), "41", "operation_new_be_uuid"), - "operation": (_("OPERATION"), "25", "operation_name"), - "outcome": (_("OUTCOME"), "12", "operation_result"), - "reason": (_("REASON"), "10", None), - "release_notes": (_("RELEASE NOTES"), "12", None), - "snapshot": (_("SNAPSHOT"), "20", "operation_snapshot"), - "start": (_("START"), "25", "operation_start_time"), - "time": (_("TIME"), "10", None), - "user": (_("USER"), "10", "operation_username"), - # omitting start state, end state, errors for now - # as these don't nicely fit into columns - } + if not long_format and not show_notes: + headers = [] + # build our format string + for i, col in enumerate(columns): + # no need for trailing space for our last column + if columns.index(col) == len(columns) - 1: + fmt = "" + else: + fmt = history_cols[col][1] + if history_fmt: + history_fmt += "{{{0:d}!s:{1}}}".format(i, fmt) + else: + history_fmt = "{{0!s:{0}}}".format(fmt) + headers.append(history_cols[col][0]) + if not omit_headers: + msg(history_fmt.format(*headers)) - omit_headers = False - long_format = False - column_format = False - show_notes = False - display_limit = None # Infinite - time_vals = [] # list of timestamps for which we want history events - columns = ["start", "operation", "client", "outcome"] - - opts, pargs = getopt.getopt(args, "HNln:o:t:") - for opt, arg in opts: - if opt == "-H": - omit_headers = True - elif opt == "-N": - show_notes = True - elif opt == "-l": - long_format = True - elif opt == "-n": - try: - display_limit = int(arg) - except ValueError: - logger.error( - _("Argument to -n must be numeric")) - return EXIT_BADOPT - - if display_limit <= 0: - logger.error( - _("Argument to -n must be positive")) - return EXIT_BADOPT - elif opt == "-o": - column_format = True - columns = arg.split(",") - - # 'command' and 'reason' are multi-field columns, we - # insist they be the last item in the -o output, - # otherwise scripts could be broken by different numbers - # of output fields - if "command" in columns and "reason" in columns: - # Translators: 'command' and 'reason' are - # keywords and should not be translated - logger.error(_("'command' and 'reason' columns " - "cannot be used together.")) - return EXIT_BADOPT - - for col in ["command", "reason"]: - if col in columns and \ - columns.index(col) != len(columns) - 1: - logger.error( - _("The '{0}' column must be the " - "last item in the -o list").format( - col)) - return EXIT_BADOPT - - for col in columns: - if col not in history_cols: - logger.error( - _("Unknown output column " - "'{0}'").format(col)) - return EXIT_BADOPT - if not __unique_columns(columns): - return EXIT_BADOPT - - elif opt == "-t": - time_vals.extend(arg.split(",")) - - if omit_headers and long_format: - usage(_("-H and -l may not be combined"), cmd="history") - - if column_format and long_format: - usage(_("-o and -l may not be combined"), cmd="history") - - if time_vals and display_limit: - usage(_("-n and -t may not be combined"), cmd="history") - - if column_format and show_notes: - usage(_("-o and -N may not be combined"), cmd="history") - - if long_format and show_notes: - usage(_("-l and -N may not be combined"), cmd="history") - - history_fmt = None - - if not long_format and not show_notes: - headers = [] - # build our format string - for i, col in enumerate(columns): - # no need for trailing space for our last column - if columns.index(col) == len(columns) - 1: - fmt = "" - else: - fmt = history_cols[col][1] - if history_fmt: - history_fmt += "{{{0:d}!s:{1}}}".format(i, fmt) - else: - history_fmt = "{{0!s:{0}}}".format(fmt) - headers.append(history_cols[col][0]) - if not omit_headers: - msg(history_fmt.format(*headers)) + def gen_entries(): + """Error handler for history generation; avoids need to indent + and clobber formatting of logic below.""" + try: + for he in api_inst.gen_history( + limit=display_limit, times=time_vals + ): + yield he + except api_errors.HistoryException as e: + error(str(e), cmd="history") + sys.exit(EXIT_OOPS) + + if show_notes: + for he in gen_entries(): + start_time = misc.timestamp_to_time(he.operation_start_time) + start_time = datetime.datetime.fromtimestamp(start_time).isoformat() + if he.operation_release_notes: + msg(_("{0}: Release notes:").format(start_time)) + for a in he.notes: + msg(" {0}".format(a)) + else: + msg(_("{0}: Release notes: None").format(start_time)) - def gen_entries(): - """Error handler for history generation; avoids need to indent - and clobber formatting of logic below.""" - try: - for he in api_inst.gen_history(limit=display_limit, - times=time_vals): - yield he - except api_errors.HistoryException as e: - error(str(e), cmd="history") - sys.exit(EXIT_OOPS) - - if show_notes: - for he in gen_entries(): - start_time = misc.timestamp_to_time( - he.operation_start_time) - start_time = datetime.datetime.fromtimestamp( - start_time).isoformat() - if he.operation_release_notes: - msg(_("{0}: Release notes:").format(start_time)) - for a in he.notes: - msg(" {0}".format(a)) - else: - msg(_("{0}: Release notes: None").format( - start_time)) + return EXIT_OK - return EXIT_OK + for he in gen_entries(): + # populate a dictionary containing our output + output = {} + for col in history_cols: + if not history_cols[col][2]: + continue + output[col] = getattr(he, history_cols[col][2], None) + + # format some of the History object attributes ourselves + output["start"] = misc.timestamp_to_time(he.operation_start_time) + output["start"] = datetime.datetime.fromtimestamp( + output["start"] + ).isoformat() + output["finish"] = misc.timestamp_to_time(he.operation_end_time) + output["finish"] = datetime.datetime.fromtimestamp( + output["finish"] + ).isoformat() + + dt_start = misc.timestamp_to_datetime(he.operation_start_time) + dt_end = misc.timestamp_to_datetime(he.operation_end_time) + if dt_start > dt_end: + output["finish"] = _("{0} (clock drift detected)").format( + output["finish"] + ) + + output["time"] = dt_end - dt_start + # We can't use timedelta's str() method, since when + # output["time"].days > 0, it prints eg. "4 days, 3:12:54" + # breaking our field separation, so we need to do this by hand. + total_time = output["time"] + secs = total_time.seconds + add_hrs = total_time.days * 24 + mins, secs = divmod(secs, 60) + hrs, mins = divmod(mins, 60) + output["time"] = "{0}:{1:02d}:{2:02d}".format(add_hrs + hrs, mins, secs) + + output["command"] = " ".join(he.client_args) + + # Where we weren't able to lookup the current name, add a '*' to + # the entry, indicating the boot environment is no longer + # present. + if he.operation_be and he.operation_current_be: + output["be"] = he.operation_current_be + elif he.operation_be_uuid: + output["be"] = "{0}*".format(he.operation_be) + else: + output["be"] = he.operation_be - for he in gen_entries(): - # populate a dictionary containing our output - output = {} - for col in history_cols: - if not history_cols[col][2]: - continue - output[col] = getattr(he, history_cols[col][2], None) - - # format some of the History object attributes ourselves - output["start"] = misc.timestamp_to_time( - he.operation_start_time) - output["start"] = datetime.datetime.fromtimestamp( - output["start"]).isoformat() - output["finish"] = misc.timestamp_to_time( - he.operation_end_time) - output["finish"] = datetime.datetime.fromtimestamp( - output["finish"]).isoformat() - - dt_start = misc.timestamp_to_datetime(he.operation_start_time) - dt_end = misc.timestamp_to_datetime(he.operation_end_time) - if dt_start > dt_end: - output["finish"] = \ - _("{0} (clock drift detected)").format( - output["finish"]) - - output["time"] = dt_end - dt_start - # We can't use timedelta's str() method, since when - # output["time"].days > 0, it prints eg. "4 days, 3:12:54" - # breaking our field separation, so we need to do this by hand. - total_time = output["time"] - secs = total_time.seconds - add_hrs = total_time.days * 24 - mins, secs = divmod(secs, 60) - hrs, mins = divmod(mins, 60) - output["time"] = "{0}:{1:02d}:{2:02d}".format( - add_hrs + hrs, mins, secs) - - output["command"] = " ".join(he.client_args) - - # Where we weren't able to lookup the current name, add a '*' to - # the entry, indicating the boot environment is no longer - # present. - if he.operation_be and he.operation_current_be: - output["be"] = he.operation_current_be - elif he.operation_be_uuid: - output["be"] = "{0}*".format(he.operation_be) - else: - output["be"] = he.operation_be + if he.operation_new_be and he.operation_current_new_be: + output["new_be"] = he.operation_current_new_be + elif he.operation_new_be_uuid: + output["new_be"] = "{0}*".format(he.operation_new_be) + else: + output["new_be"] = "{0}".format(he.operation_new_be) - if he.operation_new_be and he.operation_current_new_be: - output["new_be"] = he.operation_current_new_be - elif he.operation_new_be_uuid: - output["new_be"] = "{0}*".format(he.operation_new_be) - else: - output["new_be"] = "{0}".format(he.operation_new_be) + if he.operation_release_notes: + output["release_notes"] = _("Yes") + else: + output["release_notes"] = _("No") - if he.operation_release_notes: - output["release_notes"] = _("Yes") - else: - output["release_notes"] = _("No") + outcome, reason = he.operation_result_text + output["outcome"] = outcome + output["reason"] = reason + output["snapshot"] = he.operation_snapshot - outcome, reason = he.operation_result_text - output["outcome"] = outcome - output["reason"] = reason - output["snapshot"] = he.operation_snapshot + # be, snapshot and new_be use values in parenthesis + # since these cannot appear in valid BE or snapshot names + if not output["be"]: + output["be"] = _("(Unknown)") - # be, snapshot and new_be use values in parenthesis - # since these cannot appear in valid BE or snapshot names - if not output["be"]: - output["be"] = _("(Unknown)") + if not output["be_uuid"]: + output["be_uuid"] = _("(Unknown)") - if not output["be_uuid"]: - output["be_uuid"] = _("(Unknown)") + if not output["snapshot"]: + output["snapshot"] = _("(None)") - if not output["snapshot"]: - output["snapshot"] = _("(None)") + if not output["new_be"]: + output["new_be"] = _("(None)") - if not output["new_be"]: - output["new_be"] = _("(None)") + if not output["new_be_uuid"]: + output["new_be_uuid"] = _("(None)") - if not output["new_be_uuid"]: - output["new_be_uuid"] = _("(None)") + enc = locale.getlocale(locale.LC_CTYPE)[1] + if not enc: + enc = locale.getpreferredencoding() - enc = locale.getlocale(locale.LC_CTYPE)[1] - if not enc: - enc = locale.getpreferredencoding() + if long_format: + data = __get_long_history_data(he, output) + for field, value in data: + field = misc.force_str(field, encoding=enc) + value = misc.force_str(value, encoding=enc) + msg("{0!s:>18}: {1!s}".format(field, value)) - if long_format: - data = __get_long_history_data(he, output) - for field, value in data: - field = misc.force_str(field, encoding=enc) - value = misc.force_str(value, encoding=enc) - msg("{0!s:>18}: {1!s}".format(field, value)) + # Separate log entries with a blank line. + msg("") + else: + items = [] + for col in columns: + item = output[col] + item = misc.force_str(item, encoding=enc) + items.append(item) + msg(history_fmt.format(*items)) + return EXIT_OK - # Separate log entries with a blank line. - msg("") - else: - items = [] - for col in columns: - item = output[col] - item = misc.force_str(item, encoding=enc) - items.append(item) - msg(history_fmt.format(*items)) - return EXIT_OK def __unique_columns(columns): - """Return true if each entry in the provided list of columns only - appears once.""" - - seen_cols = set() - dup_cols = set() - for col in columns: - if col in seen_cols: - dup_cols.add(col) - seen_cols.add(col) - for col in dup_cols: - logger.error(_("Duplicate column specified: {0}").format(col)) - return not dup_cols + """Return true if each entry in the provided list of columns only + appears once.""" + + seen_cols = set() + dup_cols = set() + for col in columns: + if col in seen_cols: + dup_cols.add(col) + seen_cols.add(col) + for col in dup_cols: + logger.error(_("Duplicate column specified: {0}").format(col)) + return not dup_cols + def __get_long_history_data(he, hist_info): - """Return an array of tuples containing long_format history info""" - data = [] - data.append((_("Operation"), hist_info["operation"])) - - data.append((_("Outcome"), hist_info["outcome"])) - data.append((_("Reason"), hist_info["reason"])) - data.append((_("Client"), hist_info["client"])) - data.append((_("Version"), hist_info["client_ver"])) - - data.append((_("User"), "{0} ({1})".format(hist_info["user"], - hist_info["id"]))) - - if hist_info["be"]: - data.append((_("Boot Env."), hist_info["be"])) - if hist_info["be_uuid"]: - data.append((_("Boot Env. UUID"), hist_info["be_uuid"])) - if hist_info["new_be"]: - data.append((_("New Boot Env."), hist_info["new_be"])) - if hist_info["new_be_uuid"]: - data.append((_("New Boot Env. UUID"), - hist_info["new_be_uuid"])) - if hist_info["snapshot"]: - data.append((_("Snapshot"), hist_info["snapshot"])) - - data.append((_("Start Time"), hist_info["start"])) - data.append((_("End Time"), hist_info["finish"])) - data.append((_("Total Time"), hist_info["time"])) - data.append((_("Command"), hist_info["command"])) - data.append((_("Release Notes"), hist_info["release_notes"])) - - state = he.operation_start_state - if state: - data.append((_("Start State"), "\n" + state)) - - state = he.operation_end_state - if state: - data.append((_("End State"), "\n" + state)) - - errors = "\n".join(he.operation_errors) - if errors: - data.append((_("Errors"), "\n" + errors)) - return data + """Return an array of tuples containing long_format history info""" + data = [] + data.append((_("Operation"), hist_info["operation"])) + + data.append((_("Outcome"), hist_info["outcome"])) + data.append((_("Reason"), hist_info["reason"])) + data.append((_("Client"), hist_info["client"])) + data.append((_("Version"), hist_info["client_ver"])) + + data.append( + (_("User"), "{0} ({1})".format(hist_info["user"], hist_info["id"])) + ) + + if hist_info["be"]: + data.append((_("Boot Env."), hist_info["be"])) + if hist_info["be_uuid"]: + data.append((_("Boot Env. UUID"), hist_info["be_uuid"])) + if hist_info["new_be"]: + data.append((_("New Boot Env."), hist_info["new_be"])) + if hist_info["new_be_uuid"]: + data.append((_("New Boot Env. UUID"), hist_info["new_be_uuid"])) + if hist_info["snapshot"]: + data.append((_("Snapshot"), hist_info["snapshot"])) + + data.append((_("Start Time"), hist_info["start"])) + data.append((_("End Time"), hist_info["finish"])) + data.append((_("Total Time"), hist_info["time"])) + data.append((_("Command"), hist_info["command"])) + data.append((_("Release Notes"), hist_info["release_notes"])) + + state = he.operation_start_state + if state: + data.append((_("Start State"), "\n" + state)) + + state = he.operation_end_state + if state: + data.append((_("End State"), "\n" + state)) + + errors = "\n".join(he.operation_errors) + if errors: + data.append((_("Errors"), "\n" + errors)) + return data + def history_purge(api_inst, pargs): - """Purge image history""" - api_inst.purge_history() - msg(_("History purged.")) + """Purge image history""" + api_inst.purge_history() + msg(_("History purged.")) + def print_proxy_config(): - """If the user has configured http_proxy or https_proxy in the - environment, print out the values. Some transport errors are - not debuggable without this information handy.""" + """If the user has configured http_proxy or https_proxy in the + environment, print out the values. Some transport errors are + not debuggable without this information handy.""" - http_proxy = os.environ.get("http_proxy", None) - https_proxy = os.environ.get("https_proxy", None) + http_proxy = os.environ.get("http_proxy", None) + https_proxy = os.environ.get("https_proxy", None) - if not http_proxy and not https_proxy: - return + if not http_proxy and not https_proxy: + return + + logger.error( + _("\nThe following proxy configuration is set in the" " environment:\n") + ) + if http_proxy: + logger.error(_("http_proxy: {0}\n").format(http_proxy)) + if https_proxy: + logger.error(_("https_proxy: {0}\n").format(https_proxy)) - logger.error(_("\nThe following proxy configuration is set in the" - " environment:\n")) - if http_proxy: - logger.error(_("http_proxy: {0}\n").format(http_proxy)) - if https_proxy: - logger.error(_("https_proxy: {0}\n").format(https_proxy)) def update_format(api_inst, pargs): - """Update image to newest format.""" + """Update image to newest format.""" - try: - res = api_inst.update_format() - except api_errors.ApiException as e: - error(str(e), cmd="update-format") - return EXIT_OOPS + try: + res = api_inst.update_format() + except api_errors.ApiException as e: + error(str(e), cmd="update-format") + return EXIT_OOPS + + if res: + logger.info(_("Image format updated.")) + return EXIT_OK - if res: - logger.info(_("Image format updated.")) - return EXIT_OK + logger.info(_("Image format already current.")) + return EXIT_NOP - logger.info(_("Image format already current.")) - return EXIT_NOP def print_version(pargs): - if pargs: - usage(_("version: command does not take operands " - "('{0}')").format(" ".join(pargs)), cmd="version") - msg(pkg.VERSION) - return EXIT_OK + if pargs: + usage( + _("version: command does not take operands " "('{0}')").format( + " ".join(pargs) + ), + cmd="version", + ) + msg(pkg.VERSION) + return EXIT_OK + # To allow exception handler access to the image. _api_inst = None @@ -5757,7 +7189,8 @@ def print_version(pargs): # # {option_name: (short, long)} # -# + +# fmt: off opts_mapping = { "backup_be_name" : ("", "backup-be-name"), @@ -5994,19 +7427,25 @@ def print_version(pargs): "output_format": ["default", "tsv", "json", "json-formatted"] } +# fmt: on + # These tables are an addendum to the pkg_op_opts/opts_* lists in # modules/client/options.py. They contain all the options for functions which # are not represented in options.py but go through common option processing. # This list should get shortened and eventually removed by moving more/all # functions out of client.py. + def opts_cb_remote(op, api_inst, opts, opts_new): - options.opts_cb_fd("ctlfd", api_inst, opts, opts_new) - options.opts_cb_fd("progfd", api_inst, opts, opts_new) + options.opts_cb_fd("ctlfd", api_inst, opts, opts_new) + options.opts_cb_fd("progfd", api_inst, opts, opts_new) + + # move progfd from opts_new into a global + global_settings.client_output_progfd = opts_new["progfd"] + del opts_new["progfd"] - # move progfd from opts_new into a global - global_settings.client_output_progfd = opts_new["progfd"] - del opts_new["progfd"] + +# fmt: off opts_remote = [ opts_cb_remote, @@ -6014,11 +7453,18 @@ def opts_cb_remote(op, api_inst, opts, opts_new): ("progfd", None), ] +# fmt: on + + def opts_cb_varcet(op, api_inst, opts, opts_new): - if opts_new["list_all_items"] and opts_new["list_installed"]: - raise api_errors.InvalidOptionError( - api_errors.InvalidOptionError.INCOMPAT, - ["list_all_items", "list_installed"]) + if opts_new["list_all_items"] and opts_new["list_installed"]: + raise api_errors.InvalidOptionError( + api_errors.InvalidOptionError.INCOMPAT, + ["list_all_items", "list_installed"], + ) + + +# fmt: off opts_list_varcet = \ options.opts_table_no_headers + \ @@ -6065,470 +7511,526 @@ def opts_cb_varcet(op, api_inst, opts, opts_new): "variant" : opts_list_variant, } +# fmt: on + def main_func(): - global_settings.client_name = PKG_CLIENT_NAME + global_settings.client_name = PKG_CLIENT_NAME - global _api_inst - global img - global orig_cwd - global pargs + global _api_inst + global img + global orig_cwd + global pargs + try: + orig_cwd = os.getcwd() + except OSError as e: try: - orig_cwd = os.getcwd() - except OSError as e: + orig_cwd = os.environ["PWD"] + if not orig_cwd or orig_cwd[0] != "/": + orig_cwd = None + except KeyError: + orig_cwd = None + + try: + opts, pargs = getopt.getopt( + sys.argv[1:], + "R:D:?", + ["debug=", "help", "runid=", "notes", "no-network-cache"], + ) + except getopt.GetoptError as e: + usage(_("illegal global option -- {0}").format(e.opt)) + + runid = None + show_usage = False + for opt, arg in opts: + if opt == "-D" or opt == "--debug": + if arg in ["plan", "transport", "exclude", "actions"]: + key = arg + value = "True" + else: try: - orig_cwd = os.environ["PWD"] - if not orig_cwd or orig_cwd[0] != "/": - orig_cwd = None - except KeyError: - orig_cwd = None + key, value = arg.split("=", 1) + except (AttributeError, ValueError): + usage( + _( + "{opt} takes argument of form " + "name=value, not {arg}" + ).format(opt=opt, arg=arg) + ) + DebugValues.set_value(key, value) + elif opt == "-R": + mydir = arg + elif opt == "--runid": + runid = arg + elif opt in ("--help", "-?"): + show_usage = True + elif opt == "--notes": + notes_block() + return EXIT_OK + elif opt == "--no-network-cache": + global_settings.client_no_network_cache = True + + # The globals in pkg.digest can be influenced by debug flags + if DebugValues: + reload(pkg.digest) + + subcommand = None + if pargs: + subcommand = pargs.pop(0) + if subcommand in aliases: + subcommand = aliases[subcommand] + if subcommand == "help": + if pargs: + sub = pargs.pop(0) + if sub in cmds and sub not in ["help", "-?", "--help"]: + usage(retcode=0, full=False, cmd=sub) + elif sub == "-v": + # Only display the long usage message + # in the verbose mode. + usage(retcode=0, full=True, verbose=True) + elif sub not in ["help", "-?", "--help"]: + usage( + _("unknown subcommand " "'{0}'").format(sub), + unknown_cmd=sub, + ) + else: + usage(retcode=0, full=True) + else: + usage(retcode=0, full=True) + # A gauntlet of tests to see if we need to print usage information + if subcommand in cmds and show_usage: + usage(retcode=0, cmd=subcommand, full=False) + if subcommand and subcommand not in cmds: + usage( + _("unknown subcommand '{0}'").format(subcommand), + unknown_cmd=subcommand, + ) + if show_usage: + usage(retcode=0, full=True) + if not subcommand: + usage(_("no subcommand specified"), full=True) + if runid is not None: + try: + runid = int(runid) + except: + usage(_("runid must be an integer")) + global_settings.client_runid = runid + + for opt in ["--help", "-?"]: + if opt in pargs: + usage(retcode=0, full=False, cmd=subcommand) + + # This call only affects sockets created by Python. The transport + # framework uses the defaults in global_settings, which may be + # overridden in the environment. The default socket module should + # only be used in rare cases by ancillary code, making it safe to + # code the value here, at least for now. + socket.setdefaulttimeout(30) # in secs + + cmds_no_image = { + "version": print_version, + "image-create": image_create, + } + func = cmds_no_image.get(subcommand, None) + if func: + if "mydir" in locals(): + usage( + _("-R not allowed for {0} subcommand").format(subcommand), + cmd=subcommand, + ) try: - opts, pargs = getopt.getopt(sys.argv[1:], "R:D:?", - ["debug=", "help", "runid=", "notes", "no-network-cache"]) + pkg_timer.record("client startup", logger=logger) + ret = func(pargs) except getopt.GetoptError as e: - usage(_("illegal global option -- {0}").format(e.opt)) - - runid = None - show_usage = False - for opt, arg in opts: - if opt == "-D" or opt == "--debug": - if arg in ["plan", "transport", "exclude", "actions"]: - key = arg - value = "True" - else: - try: - key, value = arg.split("=", 1) - except (AttributeError, ValueError): - usage(_("{opt} takes argument of form " - "name=value, not {arg}").format( - opt=opt, arg=arg)) - DebugValues.set_value(key, value) - elif opt == "-R": - mydir = arg - elif opt == "--runid": - runid = arg - elif opt in ("--help", "-?"): - show_usage = True - elif opt == "--notes": - notes_block() - return EXIT_OK - elif opt == "--no-network-cache": - global_settings.client_no_network_cache = True - - # The globals in pkg.digest can be influenced by debug flags - if DebugValues: - reload(pkg.digest) - - subcommand = None - if pargs: - subcommand = pargs.pop(0) - if subcommand in aliases: - subcommand = aliases[subcommand] - if subcommand == "help": - if pargs: - sub = pargs.pop(0) - if sub in cmds and \ - sub not in ["help", "-?", "--help"]: - usage(retcode=0, full=False, cmd=sub) - elif sub == "-v": - # Only display the long usage message - # in the verbose mode. - usage(retcode=0, full=True, - verbose=True) - elif sub not in ["help", "-?", "--help"]: - usage(_("unknown subcommand " - "'{0}'").format(sub), unknown_cmd=sub) - else: - usage(retcode=0, full=True) - else: - usage(retcode=0, full=True) - - # A gauntlet of tests to see if we need to print usage information - if subcommand in cmds and show_usage: - usage(retcode=0, cmd=subcommand, full=False) - if subcommand and subcommand not in cmds: - usage(_("unknown subcommand '{0}'").format(subcommand), - unknown_cmd=subcommand) - if show_usage: - usage(retcode=0, full=True) - if not subcommand: - usage(_("no subcommand specified"), full=True) - if runid is not None: - try: - runid = int(runid) - except: - usage(_("runid must be an integer")) - global_settings.client_runid = runid - - for opt in ["--help", "-?"]: - if opt in pargs: - usage(retcode=0, full=False, cmd=subcommand) - - # This call only affects sockets created by Python. The transport - # framework uses the defaults in global_settings, which may be - # overridden in the environment. The default socket module should - # only be used in rare cases by ancillary code, making it safe to - # code the value here, at least for now. - socket.setdefaulttimeout(30) # in secs - - cmds_no_image = { - "version" : print_version, - "image-create" : image_create, - } - func = cmds_no_image.get(subcommand, None) - if func: - if "mydir" in locals(): - usage(_("-R not allowed for {0} subcommand").format( - subcommand), cmd=subcommand) - try: - pkg_timer.record("client startup", logger=logger) - ret = func(pargs) - except getopt.GetoptError as e: - usage(_("illegal option -- {0}").format(e.opt), - cmd=subcommand) - return ret - - provided_image_dir = True - pkg_image_used = False - if "mydir" not in locals(): - mydir, provided_image_dir = api.get_default_image_root( - orig_cwd=orig_cwd) - if os.environ.get("PKG_IMAGE"): - # It's assumed that this has been checked by the above - # function call and hasn't been removed from the - # environment. - pkg_image_used = True - - if not mydir: - error(_("Could not find image. Use the -R option or set " - "$PKG_IMAGE to the\nlocation of an image.")) - return EXIT_OOPS + usage(_("illegal option -- {0}").format(e.opt), cmd=subcommand) + return ret + + provided_image_dir = True + pkg_image_used = False + if "mydir" not in locals(): + mydir, provided_image_dir = api.get_default_image_root( + orig_cwd=orig_cwd + ) + if os.environ.get("PKG_IMAGE"): + # It's assumed that this has been checked by the above + # function call and hasn't been removed from the + # environment. + pkg_image_used = True + + if not mydir: + error( + _( + "Could not find image. Use the -R option or set " + "$PKG_IMAGE to the\nlocation of an image." + ) + ) + return EXIT_OOPS + + # Get ImageInterface and image object. + api_inst = __api_alloc(mydir, provided_image_dir, pkg_image_used) + if api_inst is None: + return EXIT_OOPS + _api_inst = api_inst + img = api_inst.img + + # Find subcommand and execute operation. + func = cmds[subcommand][0] + pargs_limit = None + if len(cmds[subcommand]) > 1: + pargs_limit = cmds[subcommand][1] + + pkg_timer.record("client startup", logger=logger) + + # Get the available options for the requested operation to create the + # getopt parsing strings. + valid_opts = options.get_pkg_opts(subcommand, add_table=cmd_opts) + if valid_opts is None: + # if there are no options for an op, it has its own processing + try: + return func(api_inst, pargs) + except getopt.GetoptError as e: + usage(_("illegal option -- {0}").format(e.opt), cmd=subcommand) - # Get ImageInterface and image object. - api_inst = __api_alloc(mydir, provided_image_dir, pkg_image_used) - if api_inst is None: - return EXIT_OOPS - _api_inst = api_inst - img = api_inst.img - - # Find subcommand and execute operation. - func = cmds[subcommand][0] - pargs_limit = None - if len(cmds[subcommand]) > 1: - pargs_limit = cmds[subcommand][1] - - pkg_timer.record("client startup", logger=logger) - - # Get the available options for the requested operation to create the - # getopt parsing strings. - valid_opts = options.get_pkg_opts(subcommand, add_table=cmd_opts) - if valid_opts is None: - # if there are no options for an op, it has its own processing - try: - return func(api_inst, pargs) - except getopt.GetoptError as e: - usage(_("illegal option -- {0}").format(e.opt), - cmd=subcommand) + try: + # Parse CLI arguments into dictionary containing corresponding + # options and values. + opt_dict, pargs = misc.opts_parse( + subcommand, pargs, valid_opts, opts_mapping, usage + ) + + if pargs_limit is not None and len(pargs) > pargs_limit: + usage( + _("illegal argument -- {0}").format(pargs[pargs_limit]), + cmd=subcommand, + ) + + opts = options.opts_assemble( + subcommand, api_inst, opt_dict, add_table=cmd_opts, cwd=orig_cwd + ) + + except api_errors.InvalidOptionError as e: + # We can't use the string representation of the exception since + # it references internal option names. We substitute the CLI + # options and create a new exception to make sure the messages + # are correct. + + # Convert the internal options to CLI options. We make sure that + # when there is a short and a long version for the same option + # we print both to avoid confusion. + def get_cli_opt(option): + try: + s, l = opts_mapping[option] + if l and not s: + return "--{0}".format(l) + elif s and not l: + return "-{0}".format(s) + else: + return "-{0}/--{1}".format(s, l) + except KeyError: + # ignore if we can't find a match + # (happens for repeated arguments or invalid + # arguments) + return option + except TypeError: + # ignore if we can't find a match + # (happens for an invalid arguments list) + return option + + cli_opts = [] + opt_def = [] + + for o in e.options: + cli_opts.append(get_cli_opt(o)) + + # collect the default value (see comment below) + opt_def.append( + options.get_pkg_opts_defaults(subcommand, o, add_table=cmd_opts) + ) + + # Prepare for headache: + # If we have an option 'b' which is set to True by default it + # will be toggled to False if the users specifies the according + # option on the CLI. + # If we now have an option 'a' which requires option 'b' to be + # set, we can't say "'a' requires 'b'" because the user can only + # specify 'not b'. So the correct message would be: + # "'a' is incompatible with 'not b'". + # We can get there by just changing the type of the exception + # for all cases where the default value of one of the options is + # True. + if e.err_type == api_errors.InvalidOptionError.REQUIRED: + if len(opt_def) == 2 and (opt_def[0] or opt_def[1]): + e.err_type = api_errors.InvalidOptionError.INCOMPAT + + # This new exception will have the CLI options, so can be passed + # directly to usage(). + new_e = api_errors.InvalidOptionError( + err_type=e.err_type, + options=cli_opts, + msg=e.msg, + valid_args=e.valid_args, + ) + + usage(str(new_e), cmd=subcommand) + + # Reset the progress tracker here, because we may have + # to switch to a different tracker due to the options parse. + _api_inst.progresstracker = get_tracker() + + return func(op=subcommand, api_inst=api_inst, pargs=pargs, **opts) - try: - # Parse CLI arguments into dictionary containing corresponding - # options and values. - opt_dict, pargs = misc.opts_parse(subcommand, pargs, valid_opts, - opts_mapping, usage) - - if pargs_limit is not None and len(pargs) > pargs_limit: - usage(_("illegal argument -- {0}").format( - pargs[pargs_limit]), cmd=subcommand) - - opts = options.opts_assemble(subcommand, api_inst, opt_dict, - add_table=cmd_opts, cwd=orig_cwd) - - except api_errors.InvalidOptionError as e: - - # We can't use the string representation of the exception since - # it references internal option names. We substitute the CLI - # options and create a new exception to make sure the messages - # are correct. - - # Convert the internal options to CLI options. We make sure that - # when there is a short and a long version for the same option - # we print both to avoid confusion. - def get_cli_opt(option): - try: - s, l = opts_mapping[option] - if l and not s: - return "--{0}".format(l) - elif s and not l: - return "-{0}".format(s) - else: - return "-{0}/--{1}".format(s, l) - except KeyError: - # ignore if we can't find a match - # (happens for repeated arguments or invalid - # arguments) - return option - except TypeError: - # ignore if we can't find a match - # (happens for an invalid arguments list) - return option - cli_opts = [] - opt_def = [] - - for o in e.options: - cli_opts.append(get_cli_opt(o)) - - # collect the default value (see comment below) - opt_def.append(options.get_pkg_opts_defaults(subcommand, - o, add_table=cmd_opts)) - - # Prepare for headache: - # If we have an option 'b' which is set to True by default it - # will be toggled to False if the users specifies the according - # option on the CLI. - # If we now have an option 'a' which requires option 'b' to be - # set, we can't say "'a' requires 'b'" because the user can only - # specify 'not b'. So the correct message would be: - # "'a' is incompatible with 'not b'". - # We can get there by just changing the type of the exception - # for all cases where the default value of one of the options is - # True. - if e.err_type == api_errors.InvalidOptionError.REQUIRED: - if len(opt_def) == 2 and (opt_def[0] or opt_def[1]): - e.err_type = \ - api_errors.InvalidOptionError.INCOMPAT - - # This new exception will have the CLI options, so can be passed - # directly to usage(). - new_e = api_errors.InvalidOptionError(err_type=e.err_type, - options=cli_opts, msg=e.msg, valid_args=e.valid_args) - - usage(str(new_e), cmd=subcommand) - - # Reset the progress tracker here, because we may have - # to switch to a different tracker due to the options parse. - _api_inst.progresstracker = get_tracker() - - return func(op=subcommand, api_inst=api_inst, - pargs=pargs, **opts) # # Establish a specific exit status which means: "python barfed an exception" # so that we can more easily detect these in testing of the CLI commands. # def handle_errors(func, non_wrap_print=True, *args, **kwargs): - traceback_str = misc.get_traceback_message() + traceback_str = misc.get_traceback_message() + try: + # Out of memory errors can be raised as EnvironmentErrors with + # an errno of ENOMEM, so in order to handle those exceptions + # with other errnos, we nest this try block and have the outer + # one handle the other instances. try: - # Out of memory errors can be raised as EnvironmentErrors with - # an errno of ENOMEM, so in order to handle those exceptions - # with other errnos, we nest this try block and have the outer - # one handle the other instances. - try: - __ret = func(*args, **kwargs) - except (MemoryError, EnvironmentError) as __e: - if isinstance(__e, EnvironmentError) and \ - __e.errno != errno.ENOMEM: - raise - if _api_inst: - _api_inst.abort( - result=RESULT_FAILED_OUTOFMEMORY) - error("\n" + misc.out_of_memory()) - __ret = EXIT_OOPS - except SystemExit as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_UNKNOWN) - raise __e - except (PipeError, KeyboardInterrupt): - if _api_inst: - _api_inst.abort(result=RESULT_CANCELED) - # We don't want to display any messages here to prevent - # possible further broken pipe (EPIPE) errors. - __ret = EXIT_OOPS - except api_errors.ImageInsufficentSpace as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_DISKSPACE) - error(__e) - __ret = EXIT_OOPS - except api_errors.LinkedImageException as __e: - error(_("Linked image exception(s):\n{0}").format( - str(__e))) - __ret = __e.lix_exitrv - except api_errors.PlanCreationException as __e: - error(__e) - __ret = EXIT_OOPS - except api_errors.CertificateError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_CONFIGURATION) - error(__e) - __ret = EXIT_OOPS - except api_errors.PublisherError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_BAD_REQUEST) - error(__e) - __ret = EXIT_OOPS - except api_errors.ImageLockedError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_LOCKED) - error(__e) - __ret = EXIT_LOCKED - except api_errors.ImageMissingKeyFile as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_LOCKED) - error(__e) - __ret = EXIT_EACCESS - except api_errors.TransportError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_TRANSPORT) - logger.error(_("\nErrors were encountered while attempting " - "to retrieve package or file data for\nthe requested " - "operation.")) - logger.error(_("Details follow:\n\n{0}").format(__e)) - print_proxy_config() - __ret = EXIT_OOPS - except api_errors.InvalidCatalogFile as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_STORAGE) - logger.error(_(""" + __ret = func(*args, **kwargs) + except (MemoryError, EnvironmentError) as __e: + if isinstance(__e, EnvironmentError) and __e.errno != errno.ENOMEM: + raise + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_OUTOFMEMORY) + error("\n" + misc.out_of_memory()) + __ret = EXIT_OOPS + except SystemExit as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_UNKNOWN) + raise __e + except (PipeError, KeyboardInterrupt): + if _api_inst: + _api_inst.abort(result=RESULT_CANCELED) + # We don't want to display any messages here to prevent + # possible further broken pipe (EPIPE) errors. + __ret = EXIT_OOPS + except api_errors.ImageInsufficentSpace as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_DISKSPACE) + error(__e) + __ret = EXIT_OOPS + except api_errors.LinkedImageException as __e: + error(_("Linked image exception(s):\n{0}").format(str(__e))) + __ret = __e.lix_exitrv + except api_errors.PlanCreationException as __e: + error(__e) + __ret = EXIT_OOPS + except api_errors.CertificateError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_CONFIGURATION) + error(__e) + __ret = EXIT_OOPS + except api_errors.PublisherError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_BAD_REQUEST) + error(__e) + __ret = EXIT_OOPS + except api_errors.ImageLockedError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_LOCKED) + error(__e) + __ret = EXIT_LOCKED + except api_errors.ImageMissingKeyFile as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_LOCKED) + error(__e) + __ret = EXIT_EACCESS + except api_errors.TransportError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_TRANSPORT) + logger.error( + _( + "\nErrors were encountered while attempting " + "to retrieve package or file data for\nthe requested " + "operation." + ) + ) + logger.error(_("Details follow:\n\n{0}").format(__e)) + print_proxy_config() + __ret = EXIT_OOPS + except api_errors.InvalidCatalogFile as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_STORAGE) + logger.error( + _( + """ An error was encountered while attempting to read image state information -to perform the requested operation. Details follow:\n\n{0}""").format(__e)) - __ret = EXIT_OOPS - except api_errors.InvalidDepotResponseException as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_TRANSPORT) - logger.error(_("\nUnable to contact a valid package " - "repository. This may be due to a problem with the " - "repository, network misconfiguration, or an incorrect " - "pkg client configuration. Please verify the client's " - "network configuration and repository's location.")) - logger.error(_("\nAdditional details:\n\n{0}").format(__e)) - print_proxy_config() - __ret = EXIT_OOPS - except api_errors.HistoryLoadException as __e: - # Since a history related error occurred, discard all - # information about the current operation(s) in progress. - if _api_inst: - _api_inst.clear_history() - error(_("An error was encountered while attempting to load " - "history information\nabout past client operations.")) - error(__e) - __ret = EXIT_OOPS - except api_errors.HistoryStoreException as __e: - # Since a history related error occurred, discard all - # information about the current operation(s) in progress. - if _api_inst: - _api_inst.clear_history() - error(_("An error was encountered while attempting to store " - "information about the\ncurrent operation in client " - "history.")) - error(__e) - __ret = EXIT_OOPS - except api_errors.HistoryPurgeException as __e: - # Since a history related error occurred, discard all - # information about the current operation(s) in progress. - if _api_inst: - _api_inst.clear_history() - error(_("An error was encountered while attempting to purge " - "client history.")) - error(__e) - __ret = EXIT_OOPS - except api_errors.VersionException as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_UNKNOWN) - error(_("The pkg command appears out of sync with the libraries" - " provided\nby pkg:/package/pkg. The client version is " - "{client} while the library\nAPI version is {api}.").format( - client=__e.received_version, - api=__e.expected_version - )) - __ret = EXIT_OOPS - except api_errors.WrapSuccessfulIndexingException as __e: - __ret = EXIT_OK - except api_errors.WrapIndexingException as __e: - def _wrapper(): - raise __e.wrapped - __ret = handle_errors(_wrapper, non_wrap_print=False) - s = "" - if __ret == 99: - s += _("\n{err}{stacktrace}").format( - err=__e, stacktrace=traceback_str) - - s += _("\n\nDespite the error while indexing, the operation " - "has completed successfuly.") - error(s) - except api_errors.ReadOnlyFileSystemException as __e: - __ret = EXIT_OOPS - except api_errors.UnexpectedLinkError as __e: - error("\n" + str(__e)) - __ret = EXIT_OOPS - except api_errors.UnrecognizedCatalogPart as __e: - error("\n" + str(__e)) - __ret = EXIT_OOPS - except api_errors.InvalidConfigFile as __e: - error("\n" + str(__e)) - __ret = EXIT_OOPS - except (api_errors.PkgUnicodeDecodeError, UnicodeEncodeError) as __e: - error("\n" + str(__e)) - error("the locale environment (LC_ALL, LC_CTYPE) must be" - " a UTF-8 locale or C.") - __ret = EXIT_OOPS - except: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_UNKNOWN) - if non_wrap_print: - traceback.print_exc() - error(traceback_str) - __ret = 99 - return __ret +to perform the requested operation. Details follow:\n\n{0}""" + ).format(__e) + ) + __ret = EXIT_OOPS + except api_errors.InvalidDepotResponseException as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_TRANSPORT) + logger.error( + _( + "\nUnable to contact a valid package " + "repository. This may be due to a problem with the " + "repository, network misconfiguration, or an incorrect " + "pkg client configuration. Please verify the client's " + "network configuration and repository's location." + ) + ) + logger.error(_("\nAdditional details:\n\n{0}").format(__e)) + print_proxy_config() + __ret = EXIT_OOPS + except api_errors.HistoryLoadException as __e: + # Since a history related error occurred, discard all + # information about the current operation(s) in progress. + if _api_inst: + _api_inst.clear_history() + error( + _( + "An error was encountered while attempting to load " + "history information\nabout past client operations." + ) + ) + error(__e) + __ret = EXIT_OOPS + except api_errors.HistoryStoreException as __e: + # Since a history related error occurred, discard all + # information about the current operation(s) in progress. + if _api_inst: + _api_inst.clear_history() + error( + _( + "An error was encountered while attempting to store " + "information about the\ncurrent operation in client " + "history." + ) + ) + error(__e) + __ret = EXIT_OOPS + except api_errors.HistoryPurgeException as __e: + # Since a history related error occurred, discard all + # information about the current operation(s) in progress. + if _api_inst: + _api_inst.clear_history() + error( + _( + "An error was encountered while attempting to purge " + "client history." + ) + ) + error(__e) + __ret = EXIT_OOPS + except api_errors.VersionException as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_UNKNOWN) + error( + _( + "The pkg command appears out of sync with the libraries" + " provided\nby pkg:/package/pkg. The client version is " + "{client} while the library\nAPI version is {api}." + ).format(client=__e.received_version, api=__e.expected_version) + ) + __ret = EXIT_OOPS + except api_errors.WrapSuccessfulIndexingException as __e: + __ret = EXIT_OK + except api_errors.WrapIndexingException as __e: + + def _wrapper(): + raise __e.wrapped + + __ret = handle_errors(_wrapper, non_wrap_print=False) + s = "" + if __ret == 99: + s += _("\n{err}{stacktrace}").format( + err=__e, stacktrace=traceback_str + ) + + s += _( + "\n\nDespite the error while indexing, the operation " + "has completed successfuly." + ) + error(s) + except api_errors.ReadOnlyFileSystemException as __e: + __ret = EXIT_OOPS + except api_errors.UnexpectedLinkError as __e: + error("\n" + str(__e)) + __ret = EXIT_OOPS + except api_errors.UnrecognizedCatalogPart as __e: + error("\n" + str(__e)) + __ret = EXIT_OOPS + except api_errors.InvalidConfigFile as __e: + error("\n" + str(__e)) + __ret = EXIT_OOPS + except (api_errors.PkgUnicodeDecodeError, UnicodeEncodeError) as __e: + error("\n" + str(__e)) + error( + "the locale environment (LC_ALL, LC_CTYPE) must be" + " a UTF-8 locale or C." + ) + __ret = EXIT_OOPS + except: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_UNKNOWN) + if non_wrap_print: + traceback.print_exc() + error(traceback_str) + __ret = 99 + return __ret def handle_sighupterm(signum, frame): - """Attempt to gracefully handle SIGHUP and SIGTERM by telling the api - to abort and record the cancellation before exiting.""" + """Attempt to gracefully handle SIGHUP and SIGTERM by telling the api + to abort and record the cancellation before exiting.""" - try: - if _api_inst: - _api_inst.abort(result=RESULT_CANCELED) - except: - # If history operation fails for some reason, drive on. - pass + try: + if _api_inst: + _api_inst.abort(result=RESULT_CANCELED) + except: + # If history operation fails for some reason, drive on. + pass - # Use os module to immediately exit (bypasses standard exit handling); - # this is preferred over raising a KeyboardInterupt as whatever module - # we interrupted may not expect that if they disabled SIGINT handling. - os._exit(EXIT_OOPS) + # Use os module to immediately exit (bypasses standard exit handling); + # this is preferred over raising a KeyboardInterupt as whatever module + # we interrupted may not expect that if they disabled SIGINT handling. + os._exit(EXIT_OOPS) if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - # Make all warnings be errors. - import warnings - warnings.simplefilter('error') - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - - # Attempt to handle SIGHUP/SIGTERM gracefully. - import signal - if portable.osname != "windows": - # SIGHUP not supported on windows; will cause exception. - signal.signal(signal.SIGHUP, handle_sighupterm) - signal.signal(signal.SIGTERM, handle_sighupterm) - - __retval = handle_errors(main_func) - if DebugValues["timings"]: - def __display_timings(): - msg(str(pkg_timer)) - handle_errors(__display_timings) - try: - logging.shutdown() - except IOError: - # Ignore python's spurious pipe problems. - pass - sys.exit(__retval) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + # Make all warnings be errors. + import warnings + + warnings.simplefilter("error") + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + + # Attempt to handle SIGHUP/SIGTERM gracefully. + import signal + + if portable.osname != "windows": + # SIGHUP not supported on windows; will cause exception. + signal.signal(signal.SIGHUP, handle_sighupterm) + signal.signal(signal.SIGTERM, handle_sighupterm) + + __retval = handle_errors(main_func) + if DebugValues["timings"]: + + def __display_timings(): + msg(str(pkg_timer)) + + handle_errors(__display_timings) + try: + logging.shutdown() + except IOError: + # Ignore python's spurious pipe problems. + pass + sys.exit(__retval) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/depot-config.py b/src/depot-config.py index 545911056..09cb2593b 100755 --- a/src/depot-config.py +++ b/src/depot-config.py @@ -25,7 +25,9 @@ # Copyright 2020 OmniOS Community Edition (OmniOSce) Association. # -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() import errno import getopt import gettext @@ -42,7 +44,18 @@ from mako.template import Template from mako.lookup import TemplateLookup -from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey, load_privatekey, dump_privatekey, load_certificate, dump_certificate, X509, X509Extension, FILETYPE_PEM +from OpenSSL.crypto import ( + TYPE_RSA, + TYPE_DSA, + PKey, + load_privatekey, + dump_privatekey, + load_certificate, + dump_certificate, + X509, + X509Extension, + FILETYPE_PEM, +) import pkg import pkg.client.api_errors as apx @@ -62,15 +75,15 @@ logger = global_settings.logger # exit codes -EXIT_OK = 0 -EXIT_OOPS = 1 -EXIT_BADOPT = 2 +EXIT_OK = 0 +EXIT_OOPS = 1 +EXIT_BADOPT = 2 DEPOT_HTTP_TEMPLATE = "depot_httpd.conf.mako" DEPOT_FRAGMENT_TEMPLATE = "depot.conf.mako" DEPOT_HTTP_FILENAME = "depot_httpd.conf" -DEPOT_FRAGMENT_FILENAME= "depot.conf" +DEPOT_FRAGMENT_FILENAME = "depot.conf" DEPOT_PUB_FILENAME = "index.html" DEPOT_HTDOCS_DIRNAME = "htdocs" @@ -94,50 +107,58 @@ file 1 manifest 0 status 0 -""".format(pkg.VERSION) +""".format( + pkg.VERSION +) # versions response used when we provide search capability DEPOT_VERSIONS_STR = """{0}admin 0 search 0 1 -""".format(DEPOT_FRAGMENT_VERSIONS_STR) +""".format( + DEPOT_FRAGMENT_VERSIONS_STR +) DEPOT_USER = "pkg5srv" DEPOT_GROUP = "pkg5srv" + class DepotException(Exception): - pass + pass def error(text, cmd=None): - """Emit an error message prefixed by the command name """ + """Emit an error message prefixed by the command name""" - if cmd: - text = "{0}: {1}".format(cmd, text) - pkg_cmd = "pkg.depot-config " - else: - pkg_cmd = "pkg.depot-config: " + if cmd: + text = "{0}: {1}".format(cmd, text) + pkg_cmd = "pkg.depot-config " + else: + pkg_cmd = "pkg.depot-config: " - # If we get passed something like an Exception, we can convert - # it down to a string. - text = str(text) + # If we get passed something like an Exception, we can convert + # it down to a string. + text = str(text) - # If the message starts with whitespace, assume that it should come - # *before* the command-name prefix. - text_nows = text.lstrip() - ws = text[:len(text) - len(text_nows)] + # If the message starts with whitespace, assume that it should come + # *before* the command-name prefix. + text_nows = text.lstrip() + ws = text[: len(text) - len(text_nows)] - # This has to be a constant value as we can't reliably get our actual - # program name on all platforms. - logger.error(ws + pkg_cmd + text_nows) + # This has to be a constant value as we can't reliably get our actual + # program name on all platforms. + logger.error(ws + pkg_cmd + text_nows) -def usage(usage_error=None, cmd=None, retcode=EXIT_BADOPT): - """Emit a usage message and optionally prefix it with a more - specific error message. Causes program to exit. - """ - if usage_error: - error(usage_error, cmd=cmd) - msg(_("""\ +def usage(usage_error=None, cmd=None, retcode=EXIT_BADOPT): + """Emit a usage message and optionally prefix it with a more + specific error message. Causes program to exit. + """ + + if usage_error: + error(usage_error, cmd=cmd) + msg( + _( + """\ Usage: pkg.depot-config ( -d repository_dir | -S ) -r runtime_dir [-c cache_dir] [-s cache_size] [-p port] [-h hostname] @@ -148,854 +169,1030 @@ def usage(usage_error=None, cmd=None, retcode=EXIT_BADOPT): --cert-key-dir cert_key_directory ) [ (--ca-cert ca_cert_file --ca-key ca_key_file ) ] [--smf-fmri smf_pkg_depot_fmri] ] ) -""")) - sys.exit(retcode) +""" + ) + ) + sys.exit(retcode) + def _chown_dir(dir): - """Sets ownership for the given directory to pkg5srv:pkg5srv""" + """Sets ownership for the given directory to pkg5srv:pkg5srv""" + + uid = portable.get_user_by_name(DEPOT_USER, None, False) + gid = portable.get_group_by_name(DEPOT_GROUP, None, False) + try: + os.chown(dir, uid, gid) + except OSError as err: + if not os.environ.get("PKG5_TEST_ENV", None): + raise DepotException( + _("Unable to chown {dir} to " "{user}:{group}: {err}").format( + dir=dir, user=DEPOT_USER, group=DEPOT_GROUP, err=err + ) + ) - uid = portable.get_user_by_name(DEPOT_USER, None, False) - gid = portable.get_group_by_name(DEPOT_GROUP, None, False) - try: - os.chown(dir, uid, gid) - except OSError as err: - if not os.environ.get("PKG5_TEST_ENV", None): - raise DepotException(_("Unable to chown {dir} to " - "{user}:{group}: {err}").format( - dir=dir, user=DEPOT_USER, - group=DEPOT_GROUP, err=err)) def _get_publishers(root): - """Given a repository root, return the list of available publishers, - along with the default publisher/prefix.""" - + """Given a repository root, return the list of available publishers, + along with the default publisher/prefix.""" + + try: + # we don't set writable_root, as we don't want to take the hit + # on potentially building an index here. + repository = sr.Repository(root=root, read_only=True) + + if repository.version != 4: + raise DepotException( + _("pkg.depot-config only supports v4 repositories") + ) + except Exception as e: + raise DepotException(e) + + all_pubs = [pub.prefix for pub in repository.get_publishers()] + try: + default_pub = repository.cfg.get_property("publisher", "prefix") + except cfg.UnknownPropertyError: + default_pub = None + return all_pubs, default_pub, repository.get_status() + + +def _write_httpd_conf( + pubs, + default_pubs, + runtime_dir, + log_dir, + template_dir, + cache_dir, + cache_size, + host, + port, + sroot, + fragment=False, + allow_refresh=False, + ssl_cert_file="", + ssl_key_file="", + ssl_cert_chain_file="", +): + """Writes the webserver configuration for the depot. + + pubs repository and publisher information, a list in the form + [(publisher_prefix, repo_dir, repo_prefix, + writable_root), ... ] + default_pubs default publishers, per repository, a list in the form + [(default_publisher_prefix, repo_dir, repo_prefix) ... ] + + runtime_dir where we write httpd.conf files + log_dir where Apache should write its log files + template_dir where we find our Mako templates + cache_dir where Apache should write its cache and wsgi search idx + cache_size how large our cache can grow + host our hostname, needed to set ServerName properly + port the port on which Apache should listen + sroot the prefix into the server namespace, + ignored if fragment==False + fragment True if we should only write a file to drop into conf.d/ + (i.e. a partial server configuration) + + allow_refresh True if we allow the 'refresh' or 'refresh-indexes' + admin/0 operations + + The URI namespace we create on the web server looks like this: + + ///// + //// + + 'sroot' is only used when the Apache server is serving other content + and we want to separate pkg(7) resources from the other resources + provided. + + 'repo_prefix' exists so that we can disambiguate between multiple + repositories that provide the same publisher. + + 'ssl_cert_file' the location of the server certificate file. + + 'ssl_key_file' the location of the server key file. + + 'ssl_cert_chain_file' the location of the certificate chain file if the + the server certificate is not signed by the top level CA. + """ + + try: + # check our hostname + socket.getaddrinfo(host, None) + + # Apache needs IPv6 addresses wrapped in square brackets + if ":" in host: + host = "[{0}]".format(host) + + # check our directories + dirs = [runtime_dir] + if not fragment: + dirs.append(log_dir) + if cache_dir: + dirs.append(cache_dir) + for dir in dirs + [template_dir]: + if os.path.exists(dir) and not os.path.isdir(dir): + raise DepotException(_("{0} is not a directory").format(dir)) + + for dir in dirs: + misc.makedirs(dir) + + # check our port + if not fragment: + try: + num = int(port) + if num <= 0 or num >= 65535: + raise DepotException(_("invalid port: {0}").format(port)) + except ValueError: + raise DepotException(_("invalid port: {0}").format(port)) + + # check our cache size try: - # we don't set writable_root, as we don't want to take the hit - # on potentially building an index here. - repository = sr.Repository(root=root, read_only=True) - - if repository.version != 4: - raise DepotException( - _("pkg.depot-config only supports v4 repositories")) - except Exception as e: - raise DepotException(e) - - all_pubs = [pub.prefix for pub in repository.get_publishers()] - try: - default_pub = repository.cfg.get_property("publisher", "prefix") - except cfg.UnknownPropertyError: - default_pub = None - return all_pubs, default_pub, repository.get_status() - -def _write_httpd_conf(pubs, default_pubs, runtime_dir, log_dir, template_dir, - cache_dir, cache_size, host, port, sroot, - fragment=False, allow_refresh=False, ssl_cert_file="", - ssl_key_file="", ssl_cert_chain_file=""): - """Writes the webserver configuration for the depot. - - pubs repository and publisher information, a list in the form - [(publisher_prefix, repo_dir, repo_prefix, - writable_root), ... ] - default_pubs default publishers, per repository, a list in the form - [(default_publisher_prefix, repo_dir, repo_prefix) ... ] - - runtime_dir where we write httpd.conf files - log_dir where Apache should write its log files - template_dir where we find our Mako templates - cache_dir where Apache should write its cache and wsgi search idx - cache_size how large our cache can grow - host our hostname, needed to set ServerName properly - port the port on which Apache should listen - sroot the prefix into the server namespace, - ignored if fragment==False - fragment True if we should only write a file to drop into conf.d/ - (i.e. a partial server configuration) - - allow_refresh True if we allow the 'refresh' or 'refresh-indexes' - admin/0 operations - - The URI namespace we create on the web server looks like this: - - ///// - //// - - 'sroot' is only used when the Apache server is serving other content - and we want to separate pkg(7) resources from the other resources - provided. - - 'repo_prefix' exists so that we can disambiguate between multiple - repositories that provide the same publisher. - - 'ssl_cert_file' the location of the server certificate file. - - 'ssl_key_file' the location of the server key file. - - 'ssl_cert_chain_file' the location of the certificate chain file if the - the server certificate is not signed by the top level CA. - """ - - try: - # check our hostname - socket.getaddrinfo(host, None) - - # Apache needs IPv6 addresses wrapped in square brackets - if ":" in host: - host = "[{0}]".format(host) - - # check our directories - dirs = [runtime_dir] - if not fragment: - dirs.append(log_dir) - if cache_dir: - dirs.append(cache_dir) - for dir in dirs + [template_dir]: - if os.path.exists(dir) and not os.path.isdir(dir): - raise DepotException( - _("{0} is not a directory").format(dir)) - - for dir in dirs: - misc.makedirs(dir) - - # check our port - if not fragment: - try: - num = int(port) - if num <= 0 or num >= 65535: - raise DepotException( - _("invalid port: {0}").format(port)) - except ValueError: - raise DepotException( - _("invalid port: {0}").format(port)) - - # check our cache size - try: - num = int(cache_size) - if num < 0: - raise DepotException(_("invalid cache size: " - "{0}").format(num)) - except ValueError: - raise DepotException( - _("invalid cache size: {0}").format(cache_size)) - - httpd_conf_template_path = os.path.join(template_dir, - DEPOT_HTTP_TEMPLATE) - fragment_conf_template_path = os.path.join(template_dir, - DEPOT_FRAGMENT_TEMPLATE) - - conf_lookup = TemplateLookup(directories=[template_dir]) - if fragment: - conf_template = Template( - filename=fragment_conf_template_path, - lookup=conf_lookup) - conf_path = os.path.join(runtime_dir, - DEPOT_FRAGMENT_FILENAME) - else: - conf_template = Template( - filename=httpd_conf_template_path, - lookup=conf_lookup) - conf_path = os.path.join(runtime_dir, - DEPOT_HTTP_FILENAME) - - conf_text = conf_template.render( - pubs=pubs, - default_pubs=default_pubs, - log_dir=log_dir, - cache_dir=cache_dir, - cache_size=cache_size, - runtime_dir=runtime_dir, - template_dir=template_dir, - ipv6_addr="::1", - host=host, - port=port, - sroot=sroot, - allow_refresh=allow_refresh, - ssl_cert_file=ssl_cert_file, - ssl_key_file=ssl_key_file, - ssl_cert_chain_file=ssl_cert_chain_file + num = int(cache_size) + if num < 0: + raise DepotException( + _("invalid cache size: " "{0}").format(num) ) + except ValueError: + raise DepotException( + _("invalid cache size: {0}").format(cache_size) + ) + + httpd_conf_template_path = os.path.join( + template_dir, DEPOT_HTTP_TEMPLATE + ) + fragment_conf_template_path = os.path.join( + template_dir, DEPOT_FRAGMENT_TEMPLATE + ) + + conf_lookup = TemplateLookup(directories=[template_dir]) + if fragment: + conf_template = Template( + filename=fragment_conf_template_path, lookup=conf_lookup + ) + conf_path = os.path.join(runtime_dir, DEPOT_FRAGMENT_FILENAME) + else: + conf_template = Template( + filename=httpd_conf_template_path, lookup=conf_lookup + ) + conf_path = os.path.join(runtime_dir, DEPOT_HTTP_FILENAME) + + conf_text = conf_template.render( + pubs=pubs, + default_pubs=default_pubs, + log_dir=log_dir, + cache_dir=cache_dir, + cache_size=cache_size, + runtime_dir=runtime_dir, + template_dir=template_dir, + ipv6_addr="::1", + host=host, + port=port, + sroot=sroot, + allow_refresh=allow_refresh, + ssl_cert_file=ssl_cert_file, + ssl_key_file=ssl_key_file, + ssl_cert_chain_file=ssl_cert_chain_file, + ) + + with open(conf_path, "w") as conf_file: + conf_file.write(conf_text) + + except (socket.gaierror, UnicodeError) as err: + # socket.getaddrinfo raise UnicodeDecodeError in Python 3 + # for some input, such as '.' + raise DepotException( + _("Unable to write Apache configuration: {host}: " "{err}").format( + **locals() + ) + ) + except (OSError, IOError, EnvironmentError, apx.ApiException) as err: + traceback.print_exc() + raise DepotException( + _("Unable to write depot_httpd.conf: {0}").format(err) + ) - with open(conf_path, "w") as conf_file: - conf_file.write(conf_text) - - except (socket.gaierror, UnicodeError) as err: - # socket.getaddrinfo raise UnicodeDecodeError in Python 3 - # for some input, such as '.' - raise DepotException( - _("Unable to write Apache configuration: {host}: " - "{err}").format(**locals())) - except (OSError, IOError, EnvironmentError, apx.ApiException) as err: - traceback.print_exc() - raise DepotException( - _("Unable to write depot_httpd.conf: {0}").format(err)) def _write_versions_response(htdocs_path, fragment=False): - """Writes a static versions/0 response for the Apache depot.""" + """Writes a static versions/0 response for the Apache depot.""" - try: - versions_path = os.path.join(htdocs_path, - *DEPOT_VERSIONS_DIRNAME) - misc.makedirs(versions_path) - - with open(os.path.join(versions_path, "index.html"), "w") as \ - versions_file: - versions_file.write( - fragment and DEPOT_FRAGMENT_VERSIONS_STR or - DEPOT_VERSIONS_STR) - - versions_file.close() - except (OSError, apx.ApiException) as err: - raise DepotException( - _("Unable to write versions response: {0}").format(err)) - -def _write_publisher_response(pubs, htdocs_path, repo_prefix): - """Writes a static publisher/0 response for the depot.""" - try: - # convert our list of strings to a list of Publishers - pub_objs = [pkg.client.publisher.Publisher(pub) for pub in pubs] - - # write individual reponses for the publishers - for pub in pub_objs: - pub_path = os.path.join(htdocs_path, - os.path.sep.join( - [repo_prefix, pub.prefix] + DEPOT_PUB_DIRNAME)) - misc.makedirs(pub_path) - with open(os.path.join(pub_path, "index.html"), "w") as\ - pub_file: - p5i.write(pub_file, [pub]) - - # write a response that contains all publishers - pub_path = os.path.join(htdocs_path, - os.path.sep.join([repo_prefix] + DEPOT_PUB_DIRNAME)) - os.makedirs(pub_path) - with open(os.path.join(pub_path, "index.html"), "w") as \ - pub_file: - p5i.write(pub_file, pub_objs) - - except (OSError, apx.ApiException) as err: - raise DepotException( - _("Unable to write publisher response: {0}").format(err)) - -def _write_status_response(status, htdocs_path, repo_prefix): - """Writes a status status/0 response for the depot.""" - try: - status_path = os.path.join(htdocs_path, repo_prefix, - os.path.sep.join(DEPOT_STATUS_DIRNAME), "index.html") - misc.makedirs(os.path.dirname(status_path)) - with open(status_path, "w") as status_file: - status_file.write(json.dumps(status, ensure_ascii=False, - indent=2, sort_keys=True)) - except OSError as err: - raise DepotException( - _("Unable to write status response: {0}").format(err)) - -def _createCertificateKey(serial, CN, starttime, endtime, - dump_cert_path, dump_key_path, issuerCert=None, issuerKey=None, - key_type=TYPE_RSA, key_bits=1024, digest="sha256"): - """Generate a certificate given a certificate request. + try: + versions_path = os.path.join(htdocs_path, *DEPOT_VERSIONS_DIRNAME) + misc.makedirs(versions_path) - 'serial' is the serial number for the certificate + with open( + os.path.join(versions_path, "index.html"), "w" + ) as versions_file: + versions_file.write( + fragment and DEPOT_FRAGMENT_VERSIONS_STR or DEPOT_VERSIONS_STR + ) - 'CN' is the subject common name of the certificate. + versions_file.close() + except (OSError, apx.ApiException) as err: + raise DepotException( + _("Unable to write versions response: {0}").format(err) + ) - 'starttime' is the timestamp when the certificate starts - being valid. 0 means now. - 'endtime' is the timestamp when the certificate stops being - valid - - 'dump_cert_path' is the file the generated certificate gets dumped. - - 'dump_key_path' is the file the generated key gets dumped. +def _write_publisher_response(pubs, htdocs_path, repo_prefix): + """Writes a static publisher/0 response for the depot.""" + try: + # convert our list of strings to a list of Publishers + pub_objs = [pkg.client.publisher.Publisher(pub) for pub in pubs] + + # write individual reponses for the publishers + for pub in pub_objs: + pub_path = os.path.join( + htdocs_path, + os.path.sep.join([repo_prefix, pub.prefix] + DEPOT_PUB_DIRNAME), + ) + misc.makedirs(pub_path) + with open(os.path.join(pub_path, "index.html"), "w") as pub_file: + p5i.write(pub_file, [pub]) + + # write a response that contains all publishers + pub_path = os.path.join( + htdocs_path, os.path.sep.join([repo_prefix] + DEPOT_PUB_DIRNAME) + ) + os.makedirs(pub_path) + with open(os.path.join(pub_path, "index.html"), "w") as pub_file: + p5i.write(pub_file, pub_objs) + + except (OSError, apx.ApiException) as err: + raise DepotException( + _("Unable to write publisher response: {0}").format(err) + ) - 'issuerCert' is the certificate object of the issuer. - 'issuerKey' is the key object of the issuer. +def _write_status_response(status, htdocs_path, repo_prefix): + """Writes a status status/0 response for the depot.""" + try: + status_path = os.path.join( + htdocs_path, + repo_prefix, + os.path.sep.join(DEPOT_STATUS_DIRNAME), + "index.html", + ) + misc.makedirs(os.path.dirname(status_path)) + with open(status_path, "w") as status_file: + status_file.write( + json.dumps(status, ensure_ascii=False, indent=2, sort_keys=True) + ) + except OSError as err: + raise DepotException( + _("Unable to write status response: {0}").format(err) + ) + + +def _createCertificateKey( + serial, + CN, + starttime, + endtime, + dump_cert_path, + dump_key_path, + issuerCert=None, + issuerKey=None, + key_type=TYPE_RSA, + key_bits=1024, + digest="sha256", +): + """Generate a certificate given a certificate request. + + 'serial' is the serial number for the certificate + + 'CN' is the subject common name of the certificate. + + 'starttime' is the timestamp when the certificate starts + being valid. 0 means now. + + 'endtime' is the timestamp when the certificate stops being + valid + + 'dump_cert_path' is the file the generated certificate gets dumped. + + 'dump_key_path' is the file the generated key gets dumped. + + 'issuerCert' is the certificate object of the issuer. + + 'issuerKey' is the key object of the issuer. + + 'key_type' is the key type. allowed value: TYPE_RSA and TYPE_DSA. + + 'key_bits' is number of bits to use in the key. + + 'digest' is the digestion method to use for signing. + """ + + key = PKey() + key.generate_key(key_type, key_bits) + + cert = X509() + cert.set_serial_number(serial) + cert.gmtime_adj_notBefore(starttime) + cert.gmtime_adj_notAfter(endtime) + + cert.get_subject().C = "US" + cert.get_subject().ST = "California" + cert.get_subject().L = "Santa Clara" + cert.get_subject().O = "pkg5" + + cert.set_pubkey(key) + # If a issuer is specified, set the issuer. otherwise set cert + # itself as a issuer. + if issuerCert: + cert.get_subject().CN = CN + cert.set_issuer(issuerCert.get_subject()) + else: + cert.get_subject().CN = "Depot Test CA" + cert.set_issuer(cert.get_subject()) + + # If there is a issuer key, sign with that key. Otherwise, + # create a self-signed cert. + # Cert requires bytes. + if issuerKey: + cert.add_extensions( + [X509Extension(b"basicConstraints", True, b"CA:FALSE")] + ) + cert.sign(issuerKey, digest) + else: + cert.add_extensions( + [X509Extension(b"basicConstraints", True, b"CA:TRUE")] + ) + cert.sign(key, digest) + with open(dump_cert_path, "wb") as f: + f.write(dump_certificate(FILETYPE_PEM, cert)) + with open(dump_key_path, "wb") as f: + f.write(dump_privatekey(FILETYPE_PEM, key)) + return (cert, key) + + +def _generate_server_cert_key( + host, port, ca_cert_file="", ca_key_file="", output_dir="/tmp" +): + """Generate certificate and key files for https service.""" + if os.path.exists(output_dir): + if not os.path.isdir(output_dir): + raise DepotException(_("{0} is not a directory").format(output_dir)) + else: + misc.makedirs(output_dir) + server_id = "{0}_{1}".format(host, port) + + cs_prefix = "server_{0}".format(server_id) + server_cert_file = os.path.join( + output_dir, "{0}_cert.pem".format(cs_prefix) + ) + server_key_file = os.path.join(output_dir, "{0}_key.pem".format(cs_prefix)) + + # If the cert and key files do not exist, then generate one. + if not os.path.exists(server_cert_file) or not os.path.exists( + server_key_file + ): + # Used as a factor to easily specify a year. + year_factor = 60 * 60 * 24 * 365 + + # If user specifies ca_cert_file and ca_key_file, just load + # the files. Otherwise, generate new ca_cert and ca_key. + if not ca_cert_file or not ca_key_file: + ca_cert_file = os.path.join( + output_dir, "ca_{0}_cert.pem".format(server_id) + ) + ca_key_file = os.path.join( + output_dir, "ca_{0}_key.pem".format(server_id) + ) + ca_cert, ca_key = _createCertificateKey( + 1, host, 0, year_factor * 10, ca_cert_file, ca_key_file + ) + else: + if not os.path.exists(ca_cert_file): + raise DepotException( + _( + "Cannot find user " "provided CA certificate file: {0}" + ).format(ca_cert_file) + ) + if not os.path.exists(ca_key_file): + raise DepotException( + _("Cannot find user " "provided CA key file: {0}").format( + ca_key_file + ) + ) + with open(ca_cert_file, "r") as fr: + ca_cert = load_certificate(FILETYPE_PEM, fr.read()) + with open(ca_key_file, "r") as fr: + ca_key = load_privatekey(FILETYPE_PEM, fr.read()) - 'key_type' is the key type. allowed value: TYPE_RSA and TYPE_DSA. + _createCertificateKey( + 2, + host, + 0, + year_factor * 10, + server_cert_file, + server_key_file, + issuerCert=ca_cert, + issuerKey=ca_key, + ) - 'key_bits' is number of bits to use in the key. + return (ca_cert_file, ca_key_file, server_cert_file, server_key_file) - 'digest' is the digestion method to use for signing. - """ - key = PKey() - key.generate_key(key_type, key_bits) +def cleanup_htdocs(htdocs_dir): + """Destroy any existing "htdocs" directory.""" + try: + shutil.rmtree(htdocs_dir, ignore_errors=True) + except OSError as err: + raise DepotException( + _( + "Unable to remove an existing 'htdocs' directory " + "in the runtime directory: {0}" + ).format(err) + ) + + +def refresh_conf( + repo_info, + log_dir, + host, + port, + runtime_dir, + template_dir, + cache_dir, + cache_size, + sroot, + fragment=False, + allow_refresh=False, + ssl_cert_file="", + ssl_key_file="", + ssl_cert_chain_file="", +): + """Creates a new configuration for the depot.""" + try: + ret = EXIT_OK + if not repo_info: + raise DepotException(_("no repositories found")) - cert = X509() - cert.set_serial_number(serial) - cert.gmtime_adj_notBefore(starttime) - cert.gmtime_adj_notAfter(endtime) + htdocs_path = os.path.join(runtime_dir, DEPOT_HTDOCS_DIRNAME, sroot) + cleanup_htdocs(htdocs_path) + misc.makedirs(htdocs_path) - cert.get_subject().C = "US" - cert.get_subject().ST = "California" - cert.get_subject().L = "Santa Clara" - cert.get_subject().O = "pkg5" + # pubs and default_pubs are lists of tuples of the form: + # (publisher prefix, repository root dir, repository prefix, + # writable_root) + pubs = [] + default_pubs = [] + errors = [] - cert.set_pubkey(key) - # If a issuer is specified, set the issuer. otherwise set cert - # itself as a issuer. - if issuerCert: - cert.get_subject().CN = CN - cert.set_issuer(issuerCert.get_subject()) - else: - cert.get_subject().CN = "Depot Test CA" - cert.set_issuer(cert.get_subject()) - - # If there is a issuer key, sign with that key. Otherwise, - # create a self-signed cert. - # Cert requires bytes. - if issuerKey: - cert.add_extensions([X509Extension(b"basicConstraints", True, - b"CA:FALSE")]) - cert.sign(issuerKey, digest) - else: - cert.add_extensions([X509Extension(b"basicConstraints", True, - b"CA:TRUE")]) - cert.sign(key, digest) - with open(dump_cert_path, "wb") as f: - f.write(dump_certificate(FILETYPE_PEM, cert)) - with open(dump_key_path, "wb") as f: - f.write(dump_privatekey(FILETYPE_PEM, key)) - return (cert, key) - -def _generate_server_cert_key(host, port, ca_cert_file="", ca_key_file="", - output_dir="/tmp"): - """ Generate certificate and key files for https service.""" - if os.path.exists(output_dir): - if not os.path.isdir(output_dir): - raise DepotException( - _("{0} is not a directory").format(output_dir)) + # Query each repository for its publisher information. + for repo_root, repo_prefix, writable_root in repo_info: + try: + publishers, default_pub, status = _get_publishers(repo_root) + for pub in publishers: + pubs.append((pub, repo_root, repo_prefix, writable_root)) + default_pubs.append((default_pub, repo_root, repo_prefix)) + _write_status_response(status, htdocs_path, repo_prefix) + # The writable root must exist and must be + # owned by pkg5srv:pkg5srv + if writable_root: + misc.makedirs(writable_root) + _chown_dir(writable_root) + + except DepotException as err: + errors.append(str(err)) + if errors: + raise DepotException( + _("Unable to write configuration: " "{0}").format( + "\n".join(errors) + ) + ) + + # Write the publisher/0 response for each repository + pubs_by_repo = {} + for pub_prefix, repo_root, repo_prefix, writable_root in pubs: + pubs_by_repo.setdefault(repo_prefix, []).append(pub_prefix) + for repo_prefix in pubs_by_repo: + _write_publisher_response( + pubs_by_repo[repo_prefix], htdocs_path, repo_prefix + ) + + _write_httpd_conf( + pubs, + default_pubs, + runtime_dir, + log_dir, + template_dir, + cache_dir, + cache_size, + host, + port, + sroot, + fragment=fragment, + allow_refresh=allow_refresh, + ssl_cert_file=ssl_cert_file, + ssl_key_file=ssl_key_file, + ssl_cert_chain_file=ssl_cert_chain_file, + ) + _write_versions_response(htdocs_path, fragment=fragment) + # If we're writing a configuration fragment, then the web server + # is probably not running as DEPOT_USER:DEPOT_GROUP + if not fragment: + _chown_dir(runtime_dir) + _chown_dir(cache_dir) else: - misc.makedirs(output_dir) - server_id = "{0}_{1}".format(host, port) - - cs_prefix = "server_{0}".format(server_id) - server_cert_file = os.path.join(output_dir, "{0}_cert.pem".format( - cs_prefix)) - server_key_file = os.path.join(output_dir, "{0}_key.pem".format( - cs_prefix)) - - # If the cert and key files do not exist, then generate one. - if not os.path.exists(server_cert_file) or not os.path.exists( - server_key_file): - # Used as a factor to easily specify a year. - year_factor = 60 * 60 * 24 * 365 - - # If user specifies ca_cert_file and ca_key_file, just load - # the files. Otherwise, generate new ca_cert and ca_key. - if not ca_cert_file or not ca_key_file: - ca_cert_file = os.path.join(output_dir, - "ca_{0}_cert.pem".format(server_id)) - ca_key_file = os.path.join(output_dir, - "ca_{0}_key.pem".format(server_id)) - ca_cert, ca_key = _createCertificateKey(1, host, - 0, year_factor * 10, ca_cert_file, ca_key_file) - else: - if not os.path.exists(ca_cert_file): - raise DepotException(_("Cannot find user " - "provided CA certificate file: {0}").format( - ca_cert_file)) - if not os.path.exists(ca_key_file): - raise DepotException(_("Cannot find user " - "provided CA key file: {0}").format( - ca_key_file)) - with open(ca_cert_file, "r") as fr: - ca_cert = load_certificate(FILETYPE_PEM, - fr.read()) - with open(ca_key_file, "r") as fr: - ca_key = load_privatekey(FILETYPE_PEM, - fr.read()) - - _createCertificateKey(2, host, 0, year_factor * 10, - server_cert_file, server_key_file, issuerCert=ca_cert, - issuerKey=ca_key) - - return (ca_cert_file, ca_key_file, server_cert_file, server_key_file) + msg(_("Created {0}/depot.conf").format(runtime_dir)) + except (DepotException, OSError, apx.ApiException) as err: + error(err) + ret = EXIT_OOPS + return ret -def cleanup_htdocs(htdocs_dir): - """Destroy any existing "htdocs" directory.""" - try: - shutil.rmtree(htdocs_dir, ignore_errors=True) - except OSError as err: - raise DepotException( - _("Unable to remove an existing 'htdocs' directory " - "in the runtime directory: {0}").format(err)) - -def refresh_conf(repo_info, log_dir, host, port, runtime_dir, - template_dir, cache_dir, cache_size, sroot, fragment=False, - allow_refresh=False, ssl_cert_file="", ssl_key_file="", - ssl_cert_chain_file=""): - """Creates a new configuration for the depot.""" - try: - ret = EXIT_OK - if not repo_info: - raise DepotException(_("no repositories found")) - - htdocs_path = os.path.join(runtime_dir, DEPOT_HTDOCS_DIRNAME, - sroot) - cleanup_htdocs(htdocs_path) - misc.makedirs(htdocs_path) - - # pubs and default_pubs are lists of tuples of the form: - # (publisher prefix, repository root dir, repository prefix, - # writable_root) - pubs = [] - default_pubs = [] - errors = [] - - # Query each repository for its publisher information. - for (repo_root, repo_prefix, writable_root) in repo_info: - try: - publishers, default_pub, status = \ - _get_publishers(repo_root) - for pub in publishers: - pubs.append( - (pub, repo_root, - repo_prefix, writable_root)) - default_pubs.append((default_pub, - repo_root, repo_prefix)) - _write_status_response(status, htdocs_path, - repo_prefix) - # The writable root must exist and must be - # owned by pkg5srv:pkg5srv - if writable_root: - misc.makedirs(writable_root) - _chown_dir(writable_root) - - except DepotException as err: - errors.append(str(err)) - if errors: - raise DepotException(_("Unable to write configuration: " - "{0}").format("\n".join(errors))) - - # Write the publisher/0 response for each repository - pubs_by_repo = {} - for pub_prefix, repo_root, repo_prefix, writable_root in pubs: - pubs_by_repo.setdefault(repo_prefix, []).append( - pub_prefix) - for repo_prefix in pubs_by_repo: - _write_publisher_response( - pubs_by_repo[repo_prefix], htdocs_path, repo_prefix) - - _write_httpd_conf(pubs, default_pubs, runtime_dir, log_dir, - template_dir, cache_dir, cache_size, host, port, sroot, - fragment=fragment, allow_refresh=allow_refresh, - ssl_cert_file=ssl_cert_file, ssl_key_file=ssl_key_file, - ssl_cert_chain_file=ssl_cert_chain_file) - _write_versions_response(htdocs_path, fragment=fragment) - # If we're writing a configuration fragment, then the web server - # is probably not running as DEPOT_USER:DEPOT_GROUP - if not fragment: - _chown_dir(runtime_dir) - _chown_dir(cache_dir) - else: - msg(_("Created {0}/depot.conf").format(runtime_dir)) - except (DepotException, OSError, apx.ApiException) as err: - error(err) - ret = EXIT_OOPS - return ret def get_smf_repo_info(): - """Return a list of repo_info from the online instances of pkg/server - which are marked as pkg/standalone = False and pkg/readonly = True.""" - - smf_instances = smf.check_fmris(None, "{0}:*".format(PKG_SERVER_SVC)) - repo_info = [] - for fmri in smf_instances: - repo_prefix = fmri.split(":")[-1] - repo_root = smf.get_prop(fmri, "pkg/inst_root") - writable_root = smf.get_prop(fmri, "pkg/writable_root") - if not writable_root or writable_root == '""': - writable_root = None - state = smf.get_prop(fmri, "restarter/state") - readonly = smf.get_prop(fmri, "pkg/readonly") - standalone = smf.get_prop(fmri, "pkg/standalone") - - if (state == "online" and - readonly == "true" and - standalone == "false"): - repo_info.append((repo_root, - _affix_slash(repo_prefix), writable_root)) - if not repo_info: - raise DepotException(_( - "No online, readonly, non-standalone instances of " - "{0} found.").format(PKG_SERVER_SVC)) - return repo_info + """Return a list of repo_info from the online instances of pkg/server + which are marked as pkg/standalone = False and pkg/readonly = True.""" + + smf_instances = smf.check_fmris(None, "{0}:*".format(PKG_SERVER_SVC)) + repo_info = [] + for fmri in smf_instances: + repo_prefix = fmri.split(":")[-1] + repo_root = smf.get_prop(fmri, "pkg/inst_root") + writable_root = smf.get_prop(fmri, "pkg/writable_root") + if not writable_root or writable_root == '""': + writable_root = None + state = smf.get_prop(fmri, "restarter/state") + readonly = smf.get_prop(fmri, "pkg/readonly") + standalone = smf.get_prop(fmri, "pkg/standalone") + + if state == "online" and readonly == "true" and standalone == "false": + repo_info.append( + (repo_root, _affix_slash(repo_prefix), writable_root) + ) + if not repo_info: + raise DepotException( + _( + "No online, readonly, non-standalone instances of " "{0} found." + ).format(PKG_SERVER_SVC) + ) + return repo_info + def _check_unique_repo_properties(repo_info): - """Determine whether the repository root, and supplied prefixes are - unique. The prefixes allow two or more repositories that both contain - the same publisher to be differentiated in the Apache configuration, so - that requests are routed to the correct repository.""" - - prefixes = set() - roots = set() - writable_roots = set() - errors = [] - for root, prefix, writable_root in repo_info: - if prefix in prefixes: - errors.append(_("prefix {0} cannot be used more than " - "once in a given depot configuration").format( - prefix)) - prefixes.add(prefix) - if root in roots: - errors.append(_("repo_root {0} cannot be used more " - "than once in a given depot configuration").format( - root)) - roots.add(root) - if writable_root and writable_root in writable_roots: - errors.append(_("writable_root {0} cannot be used more " - "than once in a given depot configuration").format( - writable_root)) - writable_roots.add(writable_root) - if errors: - raise DepotException("\n".join(errors)) - return True + """Determine whether the repository root, and supplied prefixes are + unique. The prefixes allow two or more repositories that both contain + the same publisher to be differentiated in the Apache configuration, so + that requests are routed to the correct repository.""" + + prefixes = set() + roots = set() + writable_roots = set() + errors = [] + for root, prefix, writable_root in repo_info: + if prefix in prefixes: + errors.append( + _( + "prefix {0} cannot be used more than " + "once in a given depot configuration" + ).format(prefix) + ) + prefixes.add(prefix) + if root in roots: + errors.append( + _( + "repo_root {0} cannot be used more " + "than once in a given depot configuration" + ).format(root) + ) + roots.add(root) + if writable_root and writable_root in writable_roots: + errors.append( + _( + "writable_root {0} cannot be used more " + "than once in a given depot configuration" + ).format(writable_root) + ) + writable_roots.add(writable_root) + if errors: + raise DepotException("\n".join(errors)) + return True + def _affix_slash(str): - val = str.lstrip("/").rstrip("/") - if "/" in str: - raise DepotException(_("cannot use '/' chars in prefixes")) - # An RE that matches valid SMF instance names works for prefixes - if not re.match(r"^([A-Za-z][_A-Za-z0-9.-]*,)?[A-Za-z][_A-Za-z0-9-]*$", - str): - raise DepotException(_("%s is not a valid prefix")) - return "{0}/".format(val) + val = str.lstrip("/").rstrip("/") + if "/" in str: + raise DepotException(_("cannot use '/' chars in prefixes")) + # An RE that matches valid SMF instance names works for prefixes + if not re.match( + r"^([A-Za-z][_A-Za-z0-9.-]*,)?[A-Za-z][_A-Za-z0-9-]*$", str + ): + raise DepotException(_("%s is not a valid prefix")) + return "{0}/".format(val) + def _update_smf_props(smf_fmri, prop_list, orig, dest): - """Update the smf props after the new prop values are generated.""" - - smf_instances = smf.check_fmris(None, smf_fmri) - for fmri in smf_instances: - refresh = False - for i in range(len(prop_list)): - if orig[i] != dest[i]: - smf.set_prop(fmri, prop_list[i], dest[i]) - refresh = True - if refresh: - smf.refresh(fmri) + """Update the smf props after the new prop values are generated.""" -def main_func(): + smf_instances = smf.check_fmris(None, smf_fmri) + for fmri in smf_instances: + refresh = False + for i in range(len(prop_list)): + if orig[i] != dest[i]: + smf.set_prop(fmri, prop_list[i], dest[i]) + refresh = True + if refresh: + smf.refresh(fmri) - # some sensible defaults - host = "0.0.0.0" - # the port we listen on - port = None - # a list of (repo_dir, repo_prefix) tuples - repo_info = [] - # the path where we store disk caches - cache_dir = None - # our maximum cache size, in megabytes - cache_size = 0 - # whether we're writing a full httpd.conf, or just a fragment - fragment = False - # Whether we support https service. - https = False - # The location of server certificate file. - ssl_cert_file = "" - # The location of server key file. - ssl_key_file = "" - # The location of the server ca certificate file. - ssl_ca_cert_file = "" - # The location of the server ca key file. - ssl_ca_key_file = "" - # Directory for storing generated certificates and keys - cert_key_dir = "" - # SSL certificate chain file path if the server certificate is not - # signed by the top level CA. - ssl_cert_chain_file = "" - # The pkg/depot smf instance fmri. - smf_fmri = "" - # an optional url-prefix, used to separate pkg5 services from the rest - # of the webserver url namespace, only used when running in fragment - # mode, otherwise we assume we're the only service running on this - # web server instance, and control the entire server URL namespace. - sroot = "" - # the path where our Mako templates and wsgi scripts are stored - template_dir = "/etc/pkg/depot" - # a volatile directory used at runtime for storing state - runtime_dir = None - # where logs are written - log_dir = "/var/log/pkg/depot" - # whether we should pull configuration from - # svc:/application/pkg/server instances - use_smf_instances = False - # whether we allow admin/0 operations to rebuild the index - allow_refresh = False - # the current server_type - server_type = "apache2" - - writable_root_set = False - try: - opts, pargs = getopt.getopt(sys.argv[1:], - "Ac:d:Fh:l:P:p:r:Ss:t:T:?", ["help", "debug=", "https", - "cert=", "key=", "ca-cert=", "ca-key=", "cert-chain=", - "cert-key-dir=", "smf-fmri="]) - for opt, arg in opts: - if opt == "--help": - usage() - elif opt == "-h": - host = arg - elif opt == "-c": - cache_dir = arg - elif opt == "-s": - cache_size = arg - elif opt == "-l": - log_dir = arg - elif opt == "-p": - port = arg - elif opt == "-r": - runtime_dir = arg - elif opt == "-T": - template_dir = arg - elif opt == "-t": - server_type = arg - elif opt == "-d": - if "=" not in arg: - usage(_("-d arguments must be in the " - "form =" - "[=writable root]")) - components = arg.split("=", 2) - if len(components) == 3: - prefix, root, writable_root = components - writable_root_set = True - elif len(components) == 2: - prefix, root = components - writable_root = None - repo_info.append((root, _affix_slash(prefix), - writable_root)) - elif opt == "-P": - sroot = _affix_slash(arg) - elif opt == "-F": - fragment = True - elif opt == "-S": - use_smf_instances = True - elif opt == "-A": - allow_refresh = True - elif opt == "--https": - https = True - elif opt == "--cert": - ssl_cert_file = arg - elif opt == "--key": - ssl_key_file = arg - elif opt == "--ca-cert": - ssl_ca_cert_file = arg - elif opt == "--ca-key": - ssl_ca_key_file = arg - elif opt == "--cert-chain": - ssl_cert_chain_file = arg - elif opt == "--cert-key-dir": - cert_key_dir = arg - elif opt == "--smf-fmri": - smf_fmri = arg - elif opt == "--debug": - try: - key, value = arg.split("=", 1) - except (AttributeError, ValueError): - usage( - _("{opt} takes argument of form " - "name=value, not {arg}").format( - opt=opt, arg=arg)) - DebugValues.set_value(key, value) - else: - usage("unknown option {0}".format(opt)) - - except getopt.GetoptError as e: - usage(_("illegal global option -- {0}").format(e.opt)) - - if not runtime_dir: - usage(_("required runtime dir option -r missing.")) - - # we need a cache_dir to store the SSLSessionCache - if not cache_dir and not fragment: - usage(_("cache_dir option -c is required if -F is not used.")) - - if not fragment and not port: - usage(_("required port option -p missing.")) - - if not use_smf_instances and not repo_info: - usage(_("at least one -d option is required if -S is " - "not used.")) - - if repo_info and use_smf_instances: - usage(_("cannot use -d and -S together.")) - - if https: - if fragment: - usage(_("https configuration is not supported in " - "fragment mode.")) - if bool(ssl_cert_file) != bool(ssl_key_file): - usage(_("certificate and key files must be presented " - "at the same time.")) - elif not ssl_cert_file and not ssl_key_file: - if not cert_key_dir: - usage(_("cert-key-dir option is require to " - "store the generated certificates and keys")) - if ssl_cert_chain_file: - usage(_("Cannot use --cert-chain without " - "--cert and --key")) - if bool(ssl_ca_cert_file) != bool(ssl_ca_key_file): - usage(_("server CA certificate and key files " - "must be presented at the same time.")) - # If fmri is specified for pkg/depot instance, we need - # record the proporty values for updating. - if smf_fmri: - orig = (ssl_ca_cert_file, ssl_ca_key_file, - ssl_cert_file, ssl_key_file) - try: - ssl_ca_cert_file, ssl_ca_key_file, ssl_cert_file, \ - ssl_key_file = \ - _generate_server_cert_key(host, port, - ca_cert_file=ssl_ca_cert_file, - ca_key_file=ssl_ca_key_file, - output_dir=cert_key_dir) - if ssl_ca_cert_file: - msg(_("Server CA certificate is " - "located at {0}. Please deploy it " - "into /etc/certs/CA directory of " - "each client.").format( - ssl_ca_cert_file)) - except (DepotException, EnvironmentError) as e: - error(e) - return EXIT_OOPS - - # Update the pkg/depot instance smf properties if - # anything changes. - if smf_fmri: - dest = (ssl_ca_cert_file, ssl_ca_key_file, - ssl_cert_file, ssl_key_file) - if orig != dest: - prop_list = ["config/ssl_ca_cert_file", - "config/ssl_ca_key_file", - "config/ssl_cert_file", - "config/ssl_key_file"] - try: - _update_smf_props(smf_fmri, prop_list, - orig, dest) - except (smf.NonzeroExitException, - RuntimeError) as e: - error(e) - return EXIT_OOPS - else: - if not os.path.exists(ssl_cert_file): - error(_("User provided server certificate " - "file {0} does not exist.").format( - ssl_cert_file)) - return EXIT_OOPS - if not os.path.exists(ssl_key_file): - error(_("User provided server key file {0} " - "does not exist.").format(ssl_key_file)) - return EXIT_OOPS - if ssl_cert_chain_file and not os.path.exists( - ssl_cert_chain_file): - error(_("User provided certificate chain file " - "{0} does not exist.").format( - ssl_cert_chain_file)) - return EXIT_OOPS - else: - if ssl_cert_file or ssl_key_file or ssl_ca_cert_file \ - or ssl_ca_key_file or ssl_cert_chain_file: - usage(_("certificate or key files are given before " - "https service is turned on. Use --https to turn " - "on the service.")) - if smf_fmri: - usage(_("cannot use --smf-fmri without --https.")) - - # We can't support httpd.conf fragments with writable root, because - # we don't have the mod_wsgi app that can build the index or serve - # search requests everywhere the fragments might be used. (eg. on - # non-Solaris systems) - if writable_root_set and fragment: - usage(_("cannot use -d with writable roots and -F together.")) - - if fragment and port: - usage(_("cannot use -F and -p together.")) - - if fragment and allow_refresh: - usage(_("cannot use -F and -A together.")) - - if sroot and not fragment: - usage(_("cannot use -P without -F.")) - - if use_smf_instances: + +def main_func(): + # some sensible defaults + host = "0.0.0.0" + # the port we listen on + port = None + # a list of (repo_dir, repo_prefix) tuples + repo_info = [] + # the path where we store disk caches + cache_dir = None + # our maximum cache size, in megabytes + cache_size = 0 + # whether we're writing a full httpd.conf, or just a fragment + fragment = False + # Whether we support https service. + https = False + # The location of server certificate file. + ssl_cert_file = "" + # The location of server key file. + ssl_key_file = "" + # The location of the server ca certificate file. + ssl_ca_cert_file = "" + # The location of the server ca key file. + ssl_ca_key_file = "" + # Directory for storing generated certificates and keys + cert_key_dir = "" + # SSL certificate chain file path if the server certificate is not + # signed by the top level CA. + ssl_cert_chain_file = "" + # The pkg/depot smf instance fmri. + smf_fmri = "" + # an optional url-prefix, used to separate pkg5 services from the rest + # of the webserver url namespace, only used when running in fragment + # mode, otherwise we assume we're the only service running on this + # web server instance, and control the entire server URL namespace. + sroot = "" + # the path where our Mako templates and wsgi scripts are stored + template_dir = "/etc/pkg/depot" + # a volatile directory used at runtime for storing state + runtime_dir = None + # where logs are written + log_dir = "/var/log/pkg/depot" + # whether we should pull configuration from + # svc:/application/pkg/server instances + use_smf_instances = False + # whether we allow admin/0 operations to rebuild the index + allow_refresh = False + # the current server_type + server_type = "apache2" + + writable_root_set = False + try: + opts, pargs = getopt.getopt( + sys.argv[1:], + "Ac:d:Fh:l:P:p:r:Ss:t:T:?", + [ + "help", + "debug=", + "https", + "cert=", + "key=", + "ca-cert=", + "ca-key=", + "cert-chain=", + "cert-key-dir=", + "smf-fmri=", + ], + ) + for opt, arg in opts: + if opt == "--help": + usage() + elif opt == "-h": + host = arg + elif opt == "-c": + cache_dir = arg + elif opt == "-s": + cache_size = arg + elif opt == "-l": + log_dir = arg + elif opt == "-p": + port = arg + elif opt == "-r": + runtime_dir = arg + elif opt == "-T": + template_dir = arg + elif opt == "-t": + server_type = arg + elif opt == "-d": + if "=" not in arg: + usage( + _( + "-d arguments must be in the " + "form =" + "[=writable root]" + ) + ) + components = arg.split("=", 2) + if len(components) == 3: + prefix, root, writable_root = components + writable_root_set = True + elif len(components) == 2: + prefix, root = components + writable_root = None + repo_info.append((root, _affix_slash(prefix), writable_root)) + elif opt == "-P": + sroot = _affix_slash(arg) + elif opt == "-F": + fragment = True + elif opt == "-S": + use_smf_instances = True + elif opt == "-A": + allow_refresh = True + elif opt == "--https": + https = True + elif opt == "--cert": + ssl_cert_file = arg + elif opt == "--key": + ssl_key_file = arg + elif opt == "--ca-cert": + ssl_ca_cert_file = arg + elif opt == "--ca-key": + ssl_ca_key_file = arg + elif opt == "--cert-chain": + ssl_cert_chain_file = arg + elif opt == "--cert-key-dir": + cert_key_dir = arg + elif opt == "--smf-fmri": + smf_fmri = arg + elif opt == "--debug": try: - repo_info = get_smf_repo_info() - except DepotException as e: + key, value = arg.split("=", 1) + except (AttributeError, ValueError): + usage( + _( + "{opt} takes argument of form " + "name=value, not {arg}" + ).format(opt=opt, arg=arg) + ) + DebugValues.set_value(key, value) + else: + usage("unknown option {0}".format(opt)) + + except getopt.GetoptError as e: + usage(_("illegal global option -- {0}").format(e.opt)) + + if not runtime_dir: + usage(_("required runtime dir option -r missing.")) + + # we need a cache_dir to store the SSLSessionCache + if not cache_dir and not fragment: + usage(_("cache_dir option -c is required if -F is not used.")) + + if not fragment and not port: + usage(_("required port option -p missing.")) + + if not use_smf_instances and not repo_info: + usage(_("at least one -d option is required if -S is " "not used.")) + + if repo_info and use_smf_instances: + usage(_("cannot use -d and -S together.")) + + if https: + if fragment: + usage( + _("https configuration is not supported in " "fragment mode.") + ) + if bool(ssl_cert_file) != bool(ssl_key_file): + usage( + _( + "certificate and key files must be presented " + "at the same time." + ) + ) + elif not ssl_cert_file and not ssl_key_file: + if not cert_key_dir: + usage( + _( + "cert-key-dir option is require to " + "store the generated certificates and keys" + ) + ) + if ssl_cert_chain_file: + usage(_("Cannot use --cert-chain without " "--cert and --key")) + if bool(ssl_ca_cert_file) != bool(ssl_ca_key_file): + usage( + _( + "server CA certificate and key files " + "must be presented at the same time." + ) + ) + # If fmri is specified for pkg/depot instance, we need + # record the proporty values for updating. + if smf_fmri: + orig = ( + ssl_ca_cert_file, + ssl_ca_key_file, + ssl_cert_file, + ssl_key_file, + ) + try: + ( + ssl_ca_cert_file, + ssl_ca_key_file, + ssl_cert_file, + ssl_key_file, + ) = _generate_server_cert_key( + host, + port, + ca_cert_file=ssl_ca_cert_file, + ca_key_file=ssl_ca_key_file, + output_dir=cert_key_dir, + ) + if ssl_ca_cert_file: + msg( + _( + "Server CA certificate is " + "located at {0}. Please deploy it " + "into /etc/certs/CA directory of " + "each client." + ).format(ssl_ca_cert_file) + ) + except (DepotException, EnvironmentError) as e: + error(e) + return EXIT_OOPS + + # Update the pkg/depot instance smf properties if + # anything changes. + if smf_fmri: + dest = ( + ssl_ca_cert_file, + ssl_ca_key_file, + ssl_cert_file, + ssl_key_file, + ) + if orig != dest: + prop_list = [ + "config/ssl_ca_cert_file", + "config/ssl_ca_key_file", + "config/ssl_cert_file", + "config/ssl_key_file", + ] + try: + _update_smf_props(smf_fmri, prop_list, orig, dest) + except (smf.NonzeroExitException, RuntimeError) as e: error(e) + return EXIT_OOPS + else: + if not os.path.exists(ssl_cert_file): + error( + _( + "User provided server certificate " + "file {0} does not exist." + ).format(ssl_cert_file) + ) + return EXIT_OOPS + if not os.path.exists(ssl_key_file): + error( + _( + "User provided server key file {0} " "does not exist." + ).format(ssl_key_file) + ) + return EXIT_OOPS + if ssl_cert_chain_file and not os.path.exists(ssl_cert_chain_file): + error( + _( + "User provided certificate chain file " + "{0} does not exist." + ).format(ssl_cert_chain_file) + ) + return EXIT_OOPS + else: + if ( + ssl_cert_file + or ssl_key_file + or ssl_ca_cert_file + or ssl_ca_key_file + or ssl_cert_chain_file + ): + usage( + _( + "certificate or key files are given before " + "https service is turned on. Use --https to turn " + "on the service." + ) + ) + if smf_fmri: + usage(_("cannot use --smf-fmri without --https.")) + + # We can't support httpd.conf fragments with writable root, because + # we don't have the mod_wsgi app that can build the index or serve + # search requests everywhere the fragments might be used. (eg. on + # non-Solaris systems) + if writable_root_set and fragment: + usage(_("cannot use -d with writable roots and -F together.")) + + if fragment and port: + usage(_("cannot use -F and -p together.")) - # In the future we may produce configuration for different - # HTTP servers. For now, we only support "apache2" - if server_type not in KNOWN_SERVER_TYPES: - usage(_("unknown server type {type}. " - "Known types are: {known}").format( - type=server_type, - known=", ".join(KNOWN_SERVER_TYPES))) + if fragment and allow_refresh: + usage(_("cannot use -F and -A together.")) + if sroot and not fragment: + usage(_("cannot use -P without -F.")) + + if use_smf_instances: try: - _check_unique_repo_properties(repo_info) + repo_info = get_smf_repo_info() except DepotException as e: - error(e) + error(e) + + # In the future we may produce configuration for different + # HTTP servers. For now, we only support "apache2" + if server_type not in KNOWN_SERVER_TYPES: + usage( + _("unknown server type {type}. " "Known types are: {known}").format( + type=server_type, known=", ".join(KNOWN_SERVER_TYPES) + ) + ) + + try: + _check_unique_repo_properties(repo_info) + except DepotException as e: + error(e) + + ret = refresh_conf( + repo_info, + log_dir, + host, + port, + runtime_dir, + template_dir, + cache_dir, + cache_size, + sroot, + fragment=fragment, + allow_refresh=allow_refresh, + ssl_cert_file=ssl_cert_file, + ssl_key_file=ssl_key_file, + ssl_cert_chain_file=ssl_cert_chain_file, + ) + return ret - ret = refresh_conf(repo_info, log_dir, host, port, runtime_dir, - template_dir, cache_dir, cache_size, sroot, fragment=fragment, - allow_refresh=allow_refresh, ssl_cert_file=ssl_cert_file, - ssl_key_file=ssl_key_file, ssl_cert_chain_file=ssl_cert_chain_file) - return ret # # Establish a specific exit status which means: "python barfed an exception" # so that we can more easily detect these in testing of the CLI commands. # def handle_errors(func, *args, **kwargs): - """Catch exceptions raised by the main program function and then print - a message and/or exit with an appropriate return code. - """ + """Catch exceptions raised by the main program function and then print + a message and/or exit with an appropriate return code. + """ - traceback_str = misc.get_traceback_message() + traceback_str = misc.get_traceback_message() + try: + # Out of memory errors can be raised as EnvironmentErrors with + # an errno of ENOMEM, so in order to handle those exceptions + # with other errnos, we nest this try block and have the outer + # one handle the other instances. try: - # Out of memory errors can be raised as EnvironmentErrors with - # an errno of ENOMEM, so in order to handle those exceptions - # with other errnos, we nest this try block and have the outer - # one handle the other instances. - try: - __ret = func(*args, **kwargs) - except (MemoryError, EnvironmentError) as __e: - if isinstance(__e, EnvironmentError) and \ - __e.errno != errno.ENOMEM: - raise - error("\n" + misc.out_of_memory()) - __ret = EXIT_OOPS - except SystemExit as __e: - raise __e - except (PipeError, KeyboardInterrupt): - # Don't display any messages here to prevent possible further - # broken pipe (EPIPE) errors. - __ret = EXIT_OOPS - except: - traceback.print_exc() - error(traceback_str) - __ret = 99 - return __ret + __ret = func(*args, **kwargs) + except (MemoryError, EnvironmentError) as __e: + if isinstance(__e, EnvironmentError) and __e.errno != errno.ENOMEM: + raise + error("\n" + misc.out_of_memory()) + __ret = EXIT_OOPS + except SystemExit as __e: + raise __e + except (PipeError, KeyboardInterrupt): + # Don't display any messages here to prevent possible further + # broken pipe (EPIPE) errors. + __ret = EXIT_OOPS + except: + traceback.print_exc() + error(traceback_str) + __ret = 99 + return __ret if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - - # Make all warnings be errors. - warnings.simplefilter('error') - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - - __retval = handle_errors(main_func) - try: - logging.shutdown() - except IOError: - # Ignore python's spurious pipe problems. - pass - sys.exit(__retval) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + + # Make all warnings be errors. + warnings.simplefilter("error") + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + + __retval = handle_errors(main_func) + try: + logging.shutdown() + except IOError: + # Ignore python's spurious pipe problems. + pass + sys.exit(__retval) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/depot.py b/src/depot.py index 4555665c5..836c6406e 100755 --- a/src/depot.py +++ b/src/depot.py @@ -23,7 +23,9 @@ # from __future__ import print_function -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() # pkg.depotd - package repository daemon @@ -84,15 +86,18 @@ from six.moves.urllib.parse import urlparse, urlunparse try: - import cherrypy - version = cherrypy.__version__.split('.') - # comparison requires same type, therefore list conversion is needed - if list(map(int, version)) < [3, 1, 0]: - raise ImportError + import cherrypy + + version = cherrypy.__version__.split(".") + # comparison requires same type, therefore list conversion is needed + if list(map(int, version)) < [3, 1, 0]: + raise ImportError except ImportError: - print("""cherrypy 3.1.0 or greater is required to use this program.""", - file=sys.stderr) - sys.exit(2) + print( + """cherrypy 3.1.0 or greater is required to use this program.""", + file=sys.stderr, + ) + sys.exit(2) import cherrypy.process.servers from cherrypy.process.plugins import Daemonizer @@ -115,41 +120,43 @@ # converting the hyphen symbol. punc = string.punctuation.replace("-", "_") if six.PY2: - translate = string.maketrans(punc, "_" * len(string.punctuation)) + translate = string.maketrans(punc, "_" * len(string.punctuation)) else: - translate = str.maketrans(punc, "_" * len(string.punctuation)) + translate = str.maketrans(punc, "_" * len(string.punctuation)) + + class Pkg5Dispatcher(Dispatcher): - def __init__(self, **args): - Dispatcher.__init__(self, translate=translate) + def __init__(self, **args): + Dispatcher.__init__(self, translate=translate) class LogSink(object): - """This is a dummy object that we can use to discard log entries - without relying on non-portable interfaces such as /dev/null.""" + """This is a dummy object that we can use to discard log entries + without relying on non-portable interfaces such as /dev/null.""" - def write(self, *args, **kwargs): - """Discard the bits.""" - pass + def write(self, *args, **kwargs): + """Discard the bits.""" + pass - def flush(self, *args, **kwargs): - """Discard the bits.""" - pass + def flush(self, *args, **kwargs): + """Discard the bits.""" + pass def usage(text=None, retcode=2, full=False): - """Optionally emit a usage message and then exit using the specified - exit code.""" + """Optionally emit a usage message and then exit using the specified + exit code.""" - if text: - emsg(text) + if text: + emsg(text) - if not full: - # The full usage message isn't desired. - emsg(_("Try `pkg.depotd --help or -?' for more " - "information.")) - sys.exit(retcode) + if not full: + # The full usage message isn't desired. + emsg(_("Try `pkg.depotd --help or -?' for more " "information.")) + sys.exit(retcode) - print("""\ + print( + """\ Usage: /usr/lib/pkg.depotd [-a address] [-d inst_root] [-p port] [-s threads] [-t socket_timeout] [--cfg] [--content-root] [--disable-ops op[/1][,...]] [--debug feature_list] @@ -244,745 +251,811 @@ def usage(text=None, retcode=2, full=False): Environment: PKG_REPO Used as default inst_root if -d not provided. PKG_DEPOT_CONTENT Used as default content_root if --content-root - not provided.""") - sys.exit(retcode) - -class OptionError(Exception): - """Option exception. """ - - def __init__(self, *args): - Exception.__init__(self, *args) + not provided.""" + ) + sys.exit(retcode) - @cherrypy.tools.register('before_finalize', priority=60) - def secureheaders(): - headers = cherrypy.response.headers - headers['X-Frame-Options'] = 'SAMEORIGIN' - headers['X-XSS-Protection'] = '1; mode=block' - headers['Content-Security-Policy'] = "default-src 'self';" -if __name__ == "__main__": +class OptionError(Exception): + """Option exception.""" - setlocale(locale.LC_ALL, "") - gettext.install("pkg", "/usr/share/locale") + def __init__(self, *args): + Exception.__init__(self, *args) - add_content = False - exit_ready = False - rebuild = False - reindex = False - nasty = False + @cherrypy.tools.register("before_finalize", priority=60) + def secureheaders(): + headers = cherrypy.response.headers + headers["X-Frame-Options"] = "SAMEORIGIN" + headers["X-XSS-Protection"] = "1; mode=block" + headers["Content-Security-Policy"] = "default-src 'self';" - # Track initial configuration values. - ivalues = { "pkg": {}, "nasty": {} } - if "PKG_REPO" in os.environ: - ivalues["pkg"]["inst_root"] = os.environ["PKG_REPO"] +if __name__ == "__main__": + setlocale(locale.LC_ALL, "") + gettext.install("pkg", "/usr/share/locale") + + add_content = False + exit_ready = False + rebuild = False + reindex = False + nasty = False + + # Track initial configuration values. + ivalues = {"pkg": {}, "nasty": {}} + if "PKG_REPO" in os.environ: + ivalues["pkg"]["inst_root"] = os.environ["PKG_REPO"] + + try: + content_root = os.environ["PKG_DEPOT_CONTENT"] + ivalues["pkg"]["content_root"] = content_root + except KeyError: try: - content_root = os.environ["PKG_DEPOT_CONTENT"] - ivalues["pkg"]["content_root"] = content_root + content_root = os.path.join(os.environ["PKG_HOME"], "share/lib/pkg") + ivalues["pkg"]["content_root"] = content_root except KeyError: - try: - content_root = os.path.join(os.environ['PKG_HOME'], - 'share/lib/pkg') - ivalues["pkg"]["content_root"] = content_root - except KeyError: - pass - - opt = None - addresses = set() - debug_features = [] - disable_ops = [] - repo_props = {} - socket_path = "" - user_cfg = None - try: - long_opts = ["add-content", "cfg=", "cfg-file=", - "content-root=", "debug=", "disable-ops=", "exit-ready", - "help", "image-root=", "log-access=", "log-errors=", - "llmirror", "mirror", "nasty=", "nasty-sleep=", - "proxy-base=", "readonly", "rebuild", "refresh-index", - "set-property=", "ssl-cert-file=", "ssl-dialog=", - "ssl-key-file=", "sort-file-max-size=", "writable-root="] - - opts, pargs = getopt.getopt(sys.argv[1:], "a:d:np:s:t:?", - long_opts) - - show_usage = False - for opt, arg in opts: - if opt == "-a": - addresses.add(arg) - elif opt == "-n": - sys.exit(0) - elif opt == "-d": - ivalues["pkg"]["inst_root"] = arg - elif opt == "-p": - ivalues["pkg"]["port"] = arg - elif opt == "-s": - threads = int(arg) - if threads < THREADS_MIN: - raise OptionError( - "minimum value is {0:d}".format( - THREADS_MIN)) - if threads > THREADS_MAX: - raise OptionError( - "maximum value is {0:d}".format( - THREADS_MAX)) - ivalues["pkg"]["threads"] = threads - elif opt == "-t": - ivalues["pkg"]["socket_timeout"] = arg - elif opt == "--add-content": - add_content = True - elif opt == "--cfg": - user_cfg = arg - elif opt == "--cfg-file": - ivalues["pkg"]["cfg_file"] = arg - elif opt == "--content-root": - ivalues["pkg"]["content_root"] = arg - elif opt == "--debug": - if arg is None or arg == "": - continue - - # A list of features can be specified using a - # "," or any whitespace character as separators. - if "," in arg: - features = arg.split(",") - else: - features = arg.split() - debug_features.extend(features) - - # We also allow key=value debug flags, which - # get set in pkg.client.debugvalues - for feature in features: - try: - key, val = feature.split("=", 1) - DebugValues.set_value(key, val) - except (AttributeError, ValueError): - pass - - elif opt == "--disable-ops": - if arg is None or arg == "": - raise OptionError( - "An argument must be specified.") - - disableops = arg.split(",") - for s in disableops: - if "/" in s: - op, ver = s.rsplit("/", 1) - else: - op = s - ver = "*" - - if op not in \ - ds.DepotHTTP.REPO_OPS_DEFAULT: - raise OptionError( - "Invalid operation " - "'{0}'.".format(s)) - disable_ops.append(s) - elif opt == "--exit-ready": - exit_ready = True - elif opt == "--image-root": - ivalues["pkg"]["image_root"] = arg - elif opt.startswith("--log-"): - prop = "log_{0}".format(opt.lstrip("--log-")) - ivalues["pkg"][prop] = arg - elif opt in ("--help", "-?"): - show_usage = True - elif opt == "--mirror": - ivalues["pkg"]["mirror"] = True - elif opt == "--llmirror": - ivalues["pkg"]["mirror"] = True - ivalues["pkg"]["ll_mirror"] = True - ivalues["pkg"]["readonly"] = True - elif opt == "--nasty": - # ValueError is caught by caller. - nasty_value = int(arg) - if (nasty_value > 100 or nasty_value < 1): - raise OptionError("Invalid value " - "for nasty option.\n Please " - "choose a value between 1 and 100.") - nasty = True - ivalues["nasty"]["nasty_level"] = nasty_value - elif opt == "--nasty-sleep": - # ValueError is caught by caller. - sleep_value = int(arg) - ivalues["nasty"]["nasty_sleep"] = sleep_value - elif opt == "--proxy-base": - # Attempt to decompose the url provided into - # its base parts. This is done so we can - # remove any scheme information since we - # don't need it. - scheme, netloc, path, params, query, \ - fragment = urlparse(arg, - "http", allow_fragments=0) - - if not netloc: - raise OptionError("Unable to " - "determine the hostname from " - "the provided URL; please use a " - "fully qualified URL.") - - scheme = scheme.lower() - if scheme not in ("http", "https"): - raise OptionError("Invalid URL; http " - "and https are the only supported " - "schemes.") - - # Rebuild the url with the sanitized components. - ivalues["pkg"]["proxy_base"] = \ - urlunparse((scheme, netloc, path, - params, query, fragment)) - elif opt == "--readonly": - ivalues["pkg"]["readonly"] = True - elif opt == "--rebuild": - rebuild = True - elif opt == "--refresh-index": - # Note: This argument is for internal use - # only. - # - # This flag is purposefully omitted in usage. - # The supported way to forcefully reindex is to - # kill any pkg.depot using that directory, - # remove the index directory, and restart the - # pkg.depot process. The index will be rebuilt - # automatically on startup. - reindex = True - exit_ready = True - elif opt == "--set-property": - try: - prop, p_value = arg.split("=", 1) - p_sec, p_name = prop.split(".", 1) - except ValueError: - usage(_("property arguments must be of " - "the form '=" - "'.")) - repo_props.setdefault(p_sec, {}) - repo_props[p_sec][p_name] = p_value - elif opt == "--ssl-cert-file": - if arg == "none" or arg == "": - # Assume this is an override to clear - # the value. - arg = "" - elif not os.path.isabs(arg): - raise OptionError("The path to " - "the Certificate file must be " - "absolute.") - elif not os.path.exists(arg): - raise OptionError("The specified " - "file does not exist.") - elif not os.path.isfile(arg): - raise OptionError("The specified " - "pathname is not a file.") - ivalues["pkg"]["ssl_cert_file"] = arg - elif opt == "--ssl-key-file": - if arg == "none" or arg == "": - # Assume this is an override to clear - # the value. - arg = "" - elif not os.path.isabs(arg): - raise OptionError("The path to " - "the Private Key file must be " - "absolute.") - elif not os.path.exists(arg): - raise OptionError("The specified " - "file does not exist.") - elif not os.path.isfile(arg): - raise OptionError("The specified " - "pathname is not a file.") - ivalues["pkg"]["ssl_key_file"] = arg - elif opt == "--ssl-dialog": - if arg != "builtin" and \ - arg != "smf" and not \ - arg.startswith("exec:/") and not \ - arg.startswith("svc:"): - raise OptionError("Invalid value " - "specified. Expected: builtin, " - "exec:/path/to/program, smf, or " - "an SMF FMRI.") - - if arg.startswith("exec:"): - if os_util.get_canonical_os_type() != \ - "unix": - # Don't allow a somewhat - # insecure authentication method - # on some platforms. - raise OptionError("exec is " - "not a supported dialog " - "type for this operating " - "system.") - - f = os.path.abspath(arg.split( - "exec:")[1]) - if not os.path.isfile(f): - raise OptionError("Invalid " - "file path specified for " - "exec.") - ivalues["pkg"]["ssl_dialog"] = arg - elif opt == "--sort-file-max-size": - ivalues["pkg"]["sort_file_max_size"] = arg - elif opt == "--writable-root": - ivalues["pkg"]["writable_root"] = arg - - # Set accumulated values. - if debug_features: - ivalues["pkg"]["debug"] = debug_features - if disable_ops: - ivalues["pkg"]["disable_ops"] = disable_ops - if addresses: - ivalues["pkg"]["address"] = list(addresses) - - if DebugValues: - reload(pkg.digest) - - # Build configuration object. - dconf = ds.DepotConfig(target=user_cfg, overrides=ivalues) - except getopt.GetoptError as _e: - usage("pkg.depotd: {0}".format(_e.msg)) - except api_errors.ApiException as _e: - usage("pkg.depotd: {0}".format(str(_e))) - except OptionError as _e: - usage("pkg.depotd: option: {0} -- {1}".format(opt, _e)) - except (ArithmeticError, ValueError): - usage("pkg.depotd: illegal option value: {0} specified " \ - "for option: {1}".format(arg, opt)) - - if show_usage: - usage(retcode=0, full=True) - - if not dconf.get_property("pkg", "log_errors"): - dconf.set_property("pkg", "log_errors", "stderr") - - # If stdout is a tty, then send access output there by default instead - # of discarding it. - if not dconf.get_property("pkg", "log_access"): - if os.isatty(sys.stdout.fileno()): - dconf.set_property("pkg", "log_access", "stdout") + pass + + opt = None + addresses = set() + debug_features = [] + disable_ops = [] + repo_props = {} + socket_path = "" + user_cfg = None + try: + long_opts = [ + "add-content", + "cfg=", + "cfg-file=", + "content-root=", + "debug=", + "disable-ops=", + "exit-ready", + "help", + "image-root=", + "log-access=", + "log-errors=", + "llmirror", + "mirror", + "nasty=", + "nasty-sleep=", + "proxy-base=", + "readonly", + "rebuild", + "refresh-index", + "set-property=", + "ssl-cert-file=", + "ssl-dialog=", + "ssl-key-file=", + "sort-file-max-size=", + "writable-root=", + ] + + opts, pargs = getopt.getopt(sys.argv[1:], "a:d:np:s:t:?", long_opts) + + show_usage = False + for opt, arg in opts: + if opt == "-a": + addresses.add(arg) + elif opt == "-n": + sys.exit(0) + elif opt == "-d": + ivalues["pkg"]["inst_root"] = arg + elif opt == "-p": + ivalues["pkg"]["port"] = arg + elif opt == "-s": + threads = int(arg) + if threads < THREADS_MIN: + raise OptionError( + "minimum value is {0:d}".format(THREADS_MIN) + ) + if threads > THREADS_MAX: + raise OptionError( + "maximum value is {0:d}".format(THREADS_MAX) + ) + ivalues["pkg"]["threads"] = threads + elif opt == "-t": + ivalues["pkg"]["socket_timeout"] = arg + elif opt == "--add-content": + add_content = True + elif opt == "--cfg": + user_cfg = arg + elif opt == "--cfg-file": + ivalues["pkg"]["cfg_file"] = arg + elif opt == "--content-root": + ivalues["pkg"]["content_root"] = arg + elif opt == "--debug": + if arg is None or arg == "": + continue + + # A list of features can be specified using a + # "," or any whitespace character as separators. + if "," in arg: + features = arg.split(",") else: - dconf.set_property("pkg", "log_access", "none") - - # Check for invalid option combinations. - image_root = dconf.get_property("pkg", "image_root") - inst_root = dconf.get_property("pkg", "inst_root") - mirror = dconf.get_property("pkg", "mirror") - ll_mirror = dconf.get_property("pkg", "ll_mirror") - readonly = dconf.get_property("pkg", "readonly") - writable_root = dconf.get_property("pkg", "writable_root") - if rebuild and add_content: - usage("--add-content cannot be used with --rebuild") - if rebuild and reindex: - usage("--refresh-index cannot be used with --rebuild") - if (rebuild or add_content) and (readonly or mirror): - usage("--readonly and --mirror cannot be used with --rebuild " - "or --add-content") - if reindex and mirror: - usage("--mirror cannot be used with --refresh-index") - if reindex and readonly and not writable_root: - usage("--readonly can only be used with --refresh-index if " - "--writable-root is used") - if image_root and not ll_mirror: - usage("--image-root can only be used with --llmirror.") - if image_root and writable_root: - usage("--image_root and --writable-root cannot be used " - "together.") - if image_root and inst_root: - usage("--image-root and -d cannot be used together.") - - # If the image format changes this may need to be reexamined. - if image_root: - inst_root = os.path.join(image_root, "var", "pkg") - - # Set any values using defaults if they weren't provided. - - # Only use the first value for now; multiple bind addresses may be - # supported later. - address = dconf.get_property("pkg", "address") - if address: - address = address[0] - elif not address: - dconf.set_property("pkg", "address", [HOST_DEFAULT]) - address = dconf.get_property("pkg", "address")[0] - - if not inst_root: - usage("Either PKG_REPO or -d must be provided") + features = arg.split() + debug_features.extend(features) + + # We also allow key=value debug flags, which + # get set in pkg.client.debugvalues + for feature in features: + try: + key, val = feature.split("=", 1) + DebugValues.set_value(key, val) + except (AttributeError, ValueError): + pass + elif opt == "--disable-ops": + if arg is None or arg == "": + raise OptionError("An argument must be specified.") + + disableops = arg.split(",") + for s in disableops: + if "/" in s: + op, ver = s.rsplit("/", 1) + else: + op = s + ver = "*" + + if op not in ds.DepotHTTP.REPO_OPS_DEFAULT: + raise OptionError( + "Invalid operation " "'{0}'.".format(s) + ) + disable_ops.append(s) + elif opt == "--exit-ready": + exit_ready = True + elif opt == "--image-root": + ivalues["pkg"]["image_root"] = arg + elif opt.startswith("--log-"): + prop = "log_{0}".format(opt.lstrip("--log-")) + ivalues["pkg"][prop] = arg + elif opt in ("--help", "-?"): + show_usage = True + elif opt == "--mirror": + ivalues["pkg"]["mirror"] = True + elif opt == "--llmirror": + ivalues["pkg"]["mirror"] = True + ivalues["pkg"]["ll_mirror"] = True + ivalues["pkg"]["readonly"] = True + elif opt == "--nasty": + # ValueError is caught by caller. + nasty_value = int(arg) + if nasty_value > 100 or nasty_value < 1: + raise OptionError( + "Invalid value " + "for nasty option.\n Please " + "choose a value between 1 and 100." + ) + nasty = True + ivalues["nasty"]["nasty_level"] = nasty_value + elif opt == "--nasty-sleep": + # ValueError is caught by caller. + sleep_value = int(arg) + ivalues["nasty"]["nasty_sleep"] = sleep_value + elif opt == "--proxy-base": + # Attempt to decompose the url provided into + # its base parts. This is done so we can + # remove any scheme information since we + # don't need it. + scheme, netloc, path, params, query, fragment = urlparse( + arg, "http", allow_fragments=0 + ) + + if not netloc: + raise OptionError( + "Unable to " + "determine the hostname from " + "the provided URL; please use a " + "fully qualified URL." + ) + + scheme = scheme.lower() + if scheme not in ("http", "https"): + raise OptionError( + "Invalid URL; http " + "and https are the only supported " + "schemes." + ) + + # Rebuild the url with the sanitized components. + ivalues["pkg"]["proxy_base"] = urlunparse( + (scheme, netloc, path, params, query, fragment) + ) + elif opt == "--readonly": + ivalues["pkg"]["readonly"] = True + elif opt == "--rebuild": + rebuild = True + elif opt == "--refresh-index": + # Note: This argument is for internal use + # only. + # + # This flag is purposefully omitted in usage. + # The supported way to forcefully reindex is to + # kill any pkg.depot using that directory, + # remove the index directory, and restart the + # pkg.depot process. The index will be rebuilt + # automatically on startup. + reindex = True + exit_ready = True + elif opt == "--set-property": + try: + prop, p_value = arg.split("=", 1) + p_sec, p_name = prop.split(".", 1) + except ValueError: + usage( + _( + "property arguments must be of " + "the form '=" + "'." + ) + ) + repo_props.setdefault(p_sec, {}) + repo_props[p_sec][p_name] = p_value + elif opt == "--ssl-cert-file": + if arg == "none" or arg == "": + # Assume this is an override to clear + # the value. + arg = "" + elif not os.path.isabs(arg): + raise OptionError( + "The path to " + "the Certificate file must be " + "absolute." + ) + elif not os.path.exists(arg): + raise OptionError("The specified " "file does not exist.") + elif not os.path.isfile(arg): + raise OptionError( + "The specified " "pathname is not a file." + ) + ivalues["pkg"]["ssl_cert_file"] = arg + elif opt == "--ssl-key-file": + if arg == "none" or arg == "": + # Assume this is an override to clear + # the value. + arg = "" + elif not os.path.isabs(arg): + raise OptionError( + "The path to " + "the Private Key file must be " + "absolute." + ) + elif not os.path.exists(arg): + raise OptionError("The specified " "file does not exist.") + elif not os.path.isfile(arg): + raise OptionError( + "The specified " "pathname is not a file." + ) + ivalues["pkg"]["ssl_key_file"] = arg + elif opt == "--ssl-dialog": + if ( + arg != "builtin" + and arg != "smf" + and not arg.startswith("exec:/") + and not arg.startswith("svc:") + ): + raise OptionError( + "Invalid value " + "specified. Expected: builtin, " + "exec:/path/to/program, smf, or " + "an SMF FMRI." + ) + + if arg.startswith("exec:"): + if os_util.get_canonical_os_type() != "unix": + # Don't allow a somewhat + # insecure authentication method + # on some platforms. + raise OptionError( + "exec is " + "not a supported dialog " + "type for this operating " + "system." + ) + + f = os.path.abspath(arg.split("exec:")[1]) + if not os.path.isfile(f): + raise OptionError( + "Invalid " "file path specified for " "exec." + ) + ivalues["pkg"]["ssl_dialog"] = arg + elif opt == "--sort-file-max-size": + ivalues["pkg"]["sort_file_max_size"] = arg + elif opt == "--writable-root": + ivalues["pkg"]["writable_root"] = arg + + # Set accumulated values. + if debug_features: + ivalues["pkg"]["debug"] = debug_features + if disable_ops: + ivalues["pkg"]["disable_ops"] = disable_ops + if addresses: + ivalues["pkg"]["address"] = list(addresses) + + if DebugValues: + reload(pkg.digest) + + # Build configuration object. + dconf = ds.DepotConfig(target=user_cfg, overrides=ivalues) + except getopt.GetoptError as _e: + usage("pkg.depotd: {0}".format(_e.msg)) + except api_errors.ApiException as _e: + usage("pkg.depotd: {0}".format(str(_e))) + except OptionError as _e: + usage("pkg.depotd: option: {0} -- {1}".format(opt, _e)) + except (ArithmeticError, ValueError): + usage( + "pkg.depotd: illegal option value: {0} specified " + "for option: {1}".format(arg, opt) + ) + + if show_usage: + usage(retcode=0, full=True) + + if not dconf.get_property("pkg", "log_errors"): + dconf.set_property("pkg", "log_errors", "stderr") + + # If stdout is a tty, then send access output there by default instead + # of discarding it. + if not dconf.get_property("pkg", "log_access"): + if os.isatty(sys.stdout.fileno()): + dconf.set_property("pkg", "log_access", "stdout") + else: + dconf.set_property("pkg", "log_access", "none") + + # Check for invalid option combinations. + image_root = dconf.get_property("pkg", "image_root") + inst_root = dconf.get_property("pkg", "inst_root") + mirror = dconf.get_property("pkg", "mirror") + ll_mirror = dconf.get_property("pkg", "ll_mirror") + readonly = dconf.get_property("pkg", "readonly") + writable_root = dconf.get_property("pkg", "writable_root") + if rebuild and add_content: + usage("--add-content cannot be used with --rebuild") + if rebuild and reindex: + usage("--refresh-index cannot be used with --rebuild") + if (rebuild or add_content) and (readonly or mirror): + usage( + "--readonly and --mirror cannot be used with --rebuild " + "or --add-content" + ) + if reindex and mirror: + usage("--mirror cannot be used with --refresh-index") + if reindex and readonly and not writable_root: + usage( + "--readonly can only be used with --refresh-index if " + "--writable-root is used" + ) + if image_root and not ll_mirror: + usage("--image-root can only be used with --llmirror.") + if image_root and writable_root: + usage("--image_root and --writable-root cannot be used " "together.") + if image_root and inst_root: + usage("--image-root and -d cannot be used together.") + + # If the image format changes this may need to be reexamined. + if image_root: + inst_root = os.path.join(image_root, "var", "pkg") + + # Set any values using defaults if they weren't provided. + + # Only use the first value for now; multiple bind addresses may be + # supported later. + address = dconf.get_property("pkg", "address") + if address: + address = address[0] + elif not address: + dconf.set_property("pkg", "address", [HOST_DEFAULT]) + address = dconf.get_property("pkg", "address")[0] + + if not inst_root: + usage("Either PKG_REPO or -d must be provided") + + content_root = dconf.get_property("pkg", "content_root") + if not content_root: + dconf.set_property("pkg", "content_root", CONTENT_PATH_DEFAULT) content_root = dconf.get_property("pkg", "content_root") - if not content_root: - dconf.set_property("pkg", "content_root", CONTENT_PATH_DEFAULT) - content_root = dconf.get_property("pkg", "content_root") + port = dconf.get_property("pkg", "port") + ssl_cert_file = dconf.get_property("pkg", "ssl_cert_file") + ssl_key_file = dconf.get_property("pkg", "ssl_key_file") + if (ssl_cert_file and not ssl_key_file) or ( + ssl_key_file and not ssl_cert_file + ): + usage( + "The --ssl-cert-file and --ssl-key-file options must " + "must both be provided when using either option." + ) + elif not port: + if ssl_cert_file and ssl_key_file: + dconf.set_property("pkg", "port", SSL_PORT_DEFAULT) + else: + dconf.set_property("pkg", "port", PORT_DEFAULT) port = dconf.get_property("pkg", "port") - ssl_cert_file = dconf.get_property("pkg", "ssl_cert_file") - ssl_key_file = dconf.get_property("pkg", "ssl_key_file") - if (ssl_cert_file and not ssl_key_file) or (ssl_key_file and not - ssl_cert_file): - usage("The --ssl-cert-file and --ssl-key-file options must " - "must both be provided when using either option.") - elif not port: - if ssl_cert_file and ssl_key_file: - dconf.set_property("pkg", "port", SSL_PORT_DEFAULT) - else: - dconf.set_property("pkg", "port", PORT_DEFAULT) - port = dconf.get_property("pkg", "port") + socket_timeout = dconf.get_property("pkg", "socket_timeout") + if not socket_timeout: + dconf.set_property("pkg", "socket_timeout", SOCKET_TIMEOUT_DEFAULT) socket_timeout = dconf.get_property("pkg", "socket_timeout") - if not socket_timeout: - dconf.set_property("pkg", "socket_timeout", - SOCKET_TIMEOUT_DEFAULT) - socket_timeout = dconf.get_property("pkg", "socket_timeout") + threads = dconf.get_property("pkg", "threads") + if not threads: + dconf.set_property("pkg", "threads", THREADS_DEFAULT) threads = dconf.get_property("pkg", "threads") - if not threads: - dconf.set_property("pkg", "threads", THREADS_DEFAULT) - threads = dconf.get_property("pkg", "threads") - - # If the program is going to reindex, the port is irrelevant since - # the program will not bind to a port. - if not exit_ready: - try: - portend.Checker().assert_free(address, port) - except Exception as e: - emsg("pkg.depotd: unable to bind to the specified " - "port: {0:d}. Reason: {1}".format(port, e)) - sys.exit(1) - else: - # Not applicable if we're not going to serve content - dconf.set_property("pkg", "content_root", "") - - # Any relative paths should be made absolute using pkg_root. 'pkg_root' - # is a special property that was added to enable internal deployment of - # multiple disparate versions of the pkg.depotd software. - pkg_root = dconf.get_property("pkg", "pkg_root") - - repo_config_file = dconf.get_property("pkg", "cfg_file") - if repo_config_file and not os.path.isabs(repo_config_file): - repo_config_file = os.path.join(pkg_root, repo_config_file) - - if content_root and not os.path.isabs(content_root): - content_root = os.path.join(pkg_root, content_root) - - if inst_root and not os.path.isabs(inst_root): - inst_root = os.path.join(pkg_root, inst_root) - - if ssl_cert_file: - if ssl_cert_file == "none": - ssl_cert_file = None - elif not os.path.isabs(ssl_cert_file): - ssl_cert_file = os.path.join(pkg_root, ssl_cert_file) - - if ssl_key_file: - if ssl_key_file == "none": - ssl_key_file = None - elif not os.path.isabs(ssl_key_file): - ssl_key_file = os.path.join(pkg_root, ssl_key_file) - - if writable_root and not os.path.isabs(writable_root): - writable_root = os.path.join(pkg_root, writable_root) - - # Setup SSL if requested. - key_data = None - ssl_dialog = dconf.get_property("pkg", "ssl_dialog") - if not exit_ready and ssl_cert_file and ssl_key_file and \ - ssl_dialog != "builtin": - cmdline = None - def get_ssl_passphrase(*ignored): - p = None - try: - if cmdline: - cmdargs = shlex.split(cmdline) - else: - cmdargs = [] - p = subprocess.Popen(cmdargs, - stdout=subprocess.PIPE, - stderr=None) - p.wait() - except Exception as __e: - emsg("pkg.depotd: an error occurred while " - "executing [{0}]; unable to obtain the " - "passphrase needed to decrypt the SSL " - "private key file: {1}".format(cmdline, - __e)) - sys.exit(1) - return p.stdout.read().strip(b"\n") - - if ssl_dialog.startswith("exec:"): - exec_path = ssl_dialog.split("exec:")[1] - if not os.path.isabs(exec_path): - exec_path = os.path.join(pkg_root, exec_path) - cmdline = "{0} {1} {2:d}".format(exec_path, "''", port) - elif ssl_dialog == "smf" or ssl_dialog.startswith("svc:"): - if ssl_dialog == "smf": - # Assume the configuration target was an SMF - # FMRI and let svcprop fail with an error if - # it wasn't. - svc_fmri = dconf.target - else: - svc_fmri = ssl_dialog - cmdline = "/usr/bin/svcprop -p " \ - "pkg_secure/ssl_key_passphrase {0}".format(svc_fmri) - - # The key file requires decryption, but the user has requested - # exec-based authentication, so it will have to be decoded first - # to an un-named temporary file. - try: - with open(ssl_key_file, "rb") as key_file: - pkey = crypto.load_privatekey( - crypto.FILETYPE_PEM, key_file.read(), - get_ssl_passphrase) - - key_data = tempfile.NamedTemporaryFile(dir=pkg_root, - delete=True) - key_data.write(crypto.dump_privatekey( - crypto.FILETYPE_PEM, pkey)) - key_data.seek(0) - except EnvironmentError as _e: - emsg("pkg.depotd: unable to read the SSL private key " - "file: {0}".format(_e)) - sys.exit(1) - except crypto.Error as _e: - emsg("pkg.depotd: authentication or cryptography " - "failure while attempting to decode\nthe SSL " - "private key file: {0}".format(_e)) - sys.exit(1) - else: - # Redirect the server to the decrypted key file. - ssl_key_file = key_data.name - - # Setup our global configuration. - gconf = { - "checker.on": True, - "environment": "production", - "log.screen": False, - "server.max_request_body_size": MAX_REQUEST_BODY_SIZE, - "server.shutdown_timeout": 0, - "server.socket_host": address, - "server.socket_port": port, - "server.socket_timeout": socket_timeout, - "server.ssl_certificate": ssl_cert_file, - "server.ssl_private_key": ssl_key_file, - "server.thread_pool": threads, - "tools.log_headers.on": True, - "tools.encode.on": True, - "tools.encode.encoding": "utf-8", - "tools.secureheaders.on" : True, - } - - if "headers" in dconf.get_property("pkg", "debug"): - # Despite its name, this only logs headers when there is an - # error; it's redundant with the debug feature enabled. - gconf["tools.log_headers.on"] = False - - # Causes the headers of every request to be logged to the error - # log; even if an exception occurs. - gconf["tools.log_headers_always.on"] = True - cherrypy.tools.log_headers_always = cherrypy.Tool( - "on_start_resource", - cherrypy.lib.cptools.log_request_headers) - - log_cfg = { - "access": dconf.get_property("pkg", "log_access"), - "errors": dconf.get_property("pkg", "log_errors") - } - - # If stdin is not a tty and the pkgdepot controller isn't being used, - # then assume process will be daemonized and redirect output. - if not os.environ.get("PKGDEPOT_CONTROLLER") and \ - not os.isatty(sys.stdin.fileno()): - # Ensure log handlers are setup to use the file descriptors for - # stdout and stderr as the Daemonizer (used for test suite and - # SMF service) requires this. - if log_cfg["access"] == "stdout": - log_cfg["access"] = "/dev/fd/{0:d}".format( - sys.stdout.fileno()) - elif log_cfg["access"] == "stderr": - log_cfg["access"] = "/dev/fd/{0:d}".format( - sys.stderr.fileno()) - elif log_cfg["access"] == "none": - log_cfg["access"] = "/dev/null" - - if log_cfg["errors"] == "stderr": - log_cfg["errors"] = "/dev/fd/{0:d}".format( - sys.stderr.fileno()) - elif log_cfg["errors"] == "stdout": - log_cfg["errors"] = "/dev/fd/{0:d}".format( - sys.stdout.fileno()) - elif log_cfg["errors"] == "none": - log_cfg["errors"] = "/dev/null" - - log_type_map = { - "errors": { - "param": "log.error_file", - "attr": "error_log" - }, - "access": { - "param": "log.access_file", - "attr": "access_log" - } - } - - for log_type in log_type_map: - dest = log_cfg[log_type] - if dest in ("stdout", "stderr", "none"): - if dest == "none": - h = logging.StreamHandler(LogSink()) - else: - h = logging.StreamHandler(eval("sys.{0}".format( - dest))) - - h.setLevel(logging.DEBUG) - h.setFormatter(cherrypy._cplogging.logfmt) - log_obj = eval("cherrypy.log.{0}".format( - log_type_map[log_type]["attr"])) - log_obj.addHandler(h) - # Since we've replaced cherrypy's log handler with our - # own, we don't want the output directed to a file. - dest = "" - elif dest: - if not os.path.isabs(dest): - dest = os.path.join(pkg_root, dest) - gconf[log_type_map[log_type]["param"]] = dest - - cherrypy.config.update(gconf) - - # Now that our logging, etc. has been setup, it's safe to perform any - # remaining preparation. - - # Initialize repository state. - if not readonly: - # Not readonly, so assume a new repository should be created. - try: - sr.repository_create(inst_root, properties=repo_props) - except sr.RepositoryExistsError: - # Already exists, nothing to do. - pass - except (api_errors.ApiException, sr.RepositoryError) as _e: - emsg("pkg.depotd: {0}".format(_e)) - sys.exit(1) + # If the program is going to reindex, the port is irrelevant since + # the program will not bind to a port. + if not exit_ready: try: - sort_file_max_size = dconf.get_property("pkg", - "sort_file_max_size") - - repo = sr.Repository(cfgpathname=repo_config_file, - log_obj=cherrypy, mirror=mirror, properties=repo_props, - read_only=readonly, root=inst_root, - sort_file_max_size=sort_file_max_size, - writable_root=writable_root) - except (RuntimeError, sr.RepositoryError) as _e: - emsg("pkg.depotd: {0}".format(_e)) - sys.exit(1) - except search_errors.IndexingException as _e: - emsg("pkg.depotd: {0}".format(str(_e)), "INDEX") - sys.exit(1) - except api_errors.ApiException as _e: - emsg("pkg.depotd: {0}".format(str(_e))) + portend.Checker().assert_free(address, port) + except Exception as e: + emsg( + "pkg.depotd: unable to bind to the specified " + "port: {0:d}. Reason: {1}".format(port, e) + ) + sys.exit(1) + else: + # Not applicable if we're not going to serve content + dconf.set_property("pkg", "content_root", "") + + # Any relative paths should be made absolute using pkg_root. 'pkg_root' + # is a special property that was added to enable internal deployment of + # multiple disparate versions of the pkg.depotd software. + pkg_root = dconf.get_property("pkg", "pkg_root") + + repo_config_file = dconf.get_property("pkg", "cfg_file") + if repo_config_file and not os.path.isabs(repo_config_file): + repo_config_file = os.path.join(pkg_root, repo_config_file) + + if content_root and not os.path.isabs(content_root): + content_root = os.path.join(pkg_root, content_root) + + if inst_root and not os.path.isabs(inst_root): + inst_root = os.path.join(pkg_root, inst_root) + + if ssl_cert_file: + if ssl_cert_file == "none": + ssl_cert_file = None + elif not os.path.isabs(ssl_cert_file): + ssl_cert_file = os.path.join(pkg_root, ssl_cert_file) + + if ssl_key_file: + if ssl_key_file == "none": + ssl_key_file = None + elif not os.path.isabs(ssl_key_file): + ssl_key_file = os.path.join(pkg_root, ssl_key_file) + + if writable_root and not os.path.isabs(writable_root): + writable_root = os.path.join(pkg_root, writable_root) + + # Setup SSL if requested. + key_data = None + ssl_dialog = dconf.get_property("pkg", "ssl_dialog") + if ( + not exit_ready + and ssl_cert_file + and ssl_key_file + and ssl_dialog != "builtin" + ): + cmdline = None + + def get_ssl_passphrase(*ignored): + p = None + try: + if cmdline: + cmdargs = shlex.split(cmdline) + else: + cmdargs = [] + p = subprocess.Popen( + cmdargs, stdout=subprocess.PIPE, stderr=None + ) + p.wait() + except Exception as __e: + emsg( + "pkg.depotd: an error occurred while " + "executing [{0}]; unable to obtain the " + "passphrase needed to decrypt the SSL " + "private key file: {1}".format(cmdline, __e) + ) sys.exit(1) - - if not rebuild and not add_content and not repo.mirror and \ - not (repo.read_only and not repo.writable_root): - # Automatically update search indexes on startup if not already - # told to, and not in readonly/mirror mode. - reindex = True - - if reindex: - try: - # Only execute a index refresh here if --exit-ready was - # requested; it will be handled later in the setup - # process for other cases. - if repo.root and exit_ready: - repo.refresh_index() - except (sr.RepositoryError, search_errors.IndexingException, - api_errors.ApiException) as e: - emsg(str(e), "INDEX") - sys.exit(1) - elif rebuild: - try: - repo.rebuild(build_index=True) - except sr.RepositoryError as e: - emsg(str(e), "REBUILD") - sys.exit(1) - except (search_errors.IndexingException, - api_errors.UnknownErrors, - api_errors.PermissionsException) as e: - emsg(str(e), "INDEX") - sys.exit(1) - elif add_content: - try: - repo.add_content() - repo.refresh_index() - except sr.RepositoryError as e: - emsg(str(e), "ADD_CONTENT") - sys.exit(1) - except (search_errors.IndexingException, - api_errors.UnknownErrors, - api_errors.PermissionsException) as e: - emsg(str(e), "INDEX") - sys.exit(1) - - # Ready to start depot; exit now if requested. - if exit_ready: - sys.exit(0) - - # Next, initialize depot. - if nasty: - depot = ds.NastyDepotHTTP(repo, dconf) + return p.stdout.read().strip(b"\n") + + if ssl_dialog.startswith("exec:"): + exec_path = ssl_dialog.split("exec:")[1] + if not os.path.isabs(exec_path): + exec_path = os.path.join(pkg_root, exec_path) + cmdline = "{0} {1} {2:d}".format(exec_path, "''", port) + elif ssl_dialog == "smf" or ssl_dialog.startswith("svc:"): + if ssl_dialog == "smf": + # Assume the configuration target was an SMF + # FMRI and let svcprop fail with an error if + # it wasn't. + svc_fmri = dconf.target + else: + svc_fmri = ssl_dialog + cmdline = ( + "/usr/bin/svcprop -p " + "pkg_secure/ssl_key_passphrase {0}".format(svc_fmri) + ) + + # The key file requires decryption, but the user has requested + # exec-based authentication, so it will have to be decoded first + # to an un-named temporary file. + try: + with open(ssl_key_file, "rb") as key_file: + pkey = crypto.load_privatekey( + crypto.FILETYPE_PEM, key_file.read(), get_ssl_passphrase + ) + + key_data = tempfile.NamedTemporaryFile(dir=pkg_root, delete=True) + key_data.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) + key_data.seek(0) + except EnvironmentError as _e: + emsg( + "pkg.depotd: unable to read the SSL private key " + "file: {0}".format(_e) + ) + sys.exit(1) + except crypto.Error as _e: + emsg( + "pkg.depotd: authentication or cryptography " + "failure while attempting to decode\nthe SSL " + "private key file: {0}".format(_e) + ) + sys.exit(1) else: - depot = ds.DepotHTTP(repo, dconf) - - # Now build our site configuration. - conf = { - "/": {}, - "/robots.txt": { - "tools.staticfile.on": True, - "tools.staticfile.filename": os.path.join(depot.web_root, - "robots.txt") - }, + # Redirect the server to the decrypted key file. + ssl_key_file = key_data.name + + # Setup our global configuration. + gconf = { + "checker.on": True, + "environment": "production", + "log.screen": False, + "server.max_request_body_size": MAX_REQUEST_BODY_SIZE, + "server.shutdown_timeout": 0, + "server.socket_host": address, + "server.socket_port": port, + "server.socket_timeout": socket_timeout, + "server.ssl_certificate": ssl_cert_file, + "server.ssl_private_key": ssl_key_file, + "server.thread_pool": threads, + "tools.log_headers.on": True, + "tools.encode.on": True, + "tools.encode.encoding": "utf-8", + "tools.secureheaders.on": True, + } + + if "headers" in dconf.get_property("pkg", "debug"): + # Despite its name, this only logs headers when there is an + # error; it's redundant with the debug feature enabled. + gconf["tools.log_headers.on"] = False + + # Causes the headers of every request to be logged to the error + # log; even if an exception occurs. + gconf["tools.log_headers_always.on"] = True + cherrypy.tools.log_headers_always = cherrypy.Tool( + "on_start_resource", cherrypy.lib.cptools.log_request_headers + ) + + log_cfg = { + "access": dconf.get_property("pkg", "log_access"), + "errors": dconf.get_property("pkg", "log_errors"), + } + + # If stdin is not a tty and the pkgdepot controller isn't being used, + # then assume process will be daemonized and redirect output. + if not os.environ.get("PKGDEPOT_CONTROLLER") and not os.isatty( + sys.stdin.fileno() + ): + # Ensure log handlers are setup to use the file descriptors for + # stdout and stderr as the Daemonizer (used for test suite and + # SMF service) requires this. + if log_cfg["access"] == "stdout": + log_cfg["access"] = "/dev/fd/{0:d}".format(sys.stdout.fileno()) + elif log_cfg["access"] == "stderr": + log_cfg["access"] = "/dev/fd/{0:d}".format(sys.stderr.fileno()) + elif log_cfg["access"] == "none": + log_cfg["access"] = "/dev/null" + + if log_cfg["errors"] == "stderr": + log_cfg["errors"] = "/dev/fd/{0:d}".format(sys.stderr.fileno()) + elif log_cfg["errors"] == "stdout": + log_cfg["errors"] = "/dev/fd/{0:d}".format(sys.stdout.fileno()) + elif log_cfg["errors"] == "none": + log_cfg["errors"] = "/dev/null" + + log_type_map = { + "errors": {"param": "log.error_file", "attr": "error_log"}, + "access": {"param": "log.access_file", "attr": "access_log"}, + } + + for log_type in log_type_map: + dest = log_cfg[log_type] + if dest in ("stdout", "stderr", "none"): + if dest == "none": + h = logging.StreamHandler(LogSink()) + else: + h = logging.StreamHandler(eval("sys.{0}".format(dest))) + + h.setLevel(logging.DEBUG) + h.setFormatter(cherrypy._cplogging.logfmt) + log_obj = eval( + "cherrypy.log.{0}".format(log_type_map[log_type]["attr"]) + ) + log_obj.addHandler(h) + # Since we've replaced cherrypy's log handler with our + # own, we don't want the output directed to a file. + dest = "" + elif dest: + if not os.path.isabs(dest): + dest = os.path.join(pkg_root, dest) + gconf[log_type_map[log_type]["param"]] = dest + + cherrypy.config.update(gconf) + + # Now that our logging, etc. has been setup, it's safe to perform any + # remaining preparation. + + # Initialize repository state. + if not readonly: + # Not readonly, so assume a new repository should be created. + try: + sr.repository_create(inst_root, properties=repo_props) + except sr.RepositoryExistsError: + # Already exists, nothing to do. + pass + except (api_errors.ApiException, sr.RepositoryError) as _e: + emsg("pkg.depotd: {0}".format(_e)) + sys.exit(1) + + try: + sort_file_max_size = dconf.get_property("pkg", "sort_file_max_size") + + repo = sr.Repository( + cfgpathname=repo_config_file, + log_obj=cherrypy, + mirror=mirror, + properties=repo_props, + read_only=readonly, + root=inst_root, + sort_file_max_size=sort_file_max_size, + writable_root=writable_root, + ) + except (RuntimeError, sr.RepositoryError) as _e: + emsg("pkg.depotd: {0}".format(_e)) + sys.exit(1) + except search_errors.IndexingException as _e: + emsg("pkg.depotd: {0}".format(str(_e)), "INDEX") + sys.exit(1) + except api_errors.ApiException as _e: + emsg("pkg.depotd: {0}".format(str(_e))) + sys.exit(1) + + if ( + not rebuild + and not add_content + and not repo.mirror + and not (repo.read_only and not repo.writable_root) + ): + # Automatically update search indexes on startup if not already + # told to, and not in readonly/mirror mode. + reindex = True + + if reindex: + try: + # Only execute a index refresh here if --exit-ready was + # requested; it will be handled later in the setup + # process for other cases. + if repo.root and exit_ready: + repo.refresh_index() + except ( + sr.RepositoryError, + search_errors.IndexingException, + api_errors.ApiException, + ) as e: + emsg(str(e), "INDEX") + sys.exit(1) + elif rebuild: + try: + repo.rebuild(build_index=True) + except sr.RepositoryError as e: + emsg(str(e), "REBUILD") + sys.exit(1) + except ( + search_errors.IndexingException, + api_errors.UnknownErrors, + api_errors.PermissionsException, + ) as e: + emsg(str(e), "INDEX") + sys.exit(1) + elif add_content: + try: + repo.add_content() + repo.refresh_index() + except sr.RepositoryError as e: + emsg(str(e), "ADD_CONTENT") + sys.exit(1) + except ( + search_errors.IndexingException, + api_errors.UnknownErrors, + api_errors.PermissionsException, + ) as e: + emsg(str(e), "INDEX") + sys.exit(1) + + # Ready to start depot; exit now if requested. + if exit_ready: + sys.exit(0) + + # Next, initialize depot. + if nasty: + depot = ds.NastyDepotHTTP(repo, dconf) + else: + depot = ds.DepotHTTP(repo, dconf) + + # Now build our site configuration. + conf = { + "/": {}, + "/robots.txt": { + "tools.staticfile.on": True, + "tools.staticfile.filename": os.path.join( + depot.web_root, "robots.txt" + ), + }, + } + if list(map(int, version)) >= [3, 2, 0]: + conf["/"]["request.dispatch"] = Pkg5Dispatcher() + + proxy_base = dconf.get_property("pkg", "proxy_base") + if proxy_base: + # This changes the base URL for our server, and is primarily + # intended to allow our depot process to operate behind Apache + # or some other webserver process. + # + # Visit the following URL for more information: + # http://cherrypy.org/wiki/BuiltinTools#tools.proxy + proxy_conf = { + "tools.proxy.on": True, + "tools.proxy.local": "", + "tools.proxy.base": proxy_base, } - if list(map(int, version)) >= [3, 2, 0]: - conf["/"]["request.dispatch"] = Pkg5Dispatcher() - - proxy_base = dconf.get_property("pkg", "proxy_base") - if proxy_base: - # This changes the base URL for our server, and is primarily - # intended to allow our depot process to operate behind Apache - # or some other webserver process. - # - # Visit the following URL for more information: - # http://cherrypy.org/wiki/BuiltinTools#tools.proxy - proxy_conf = { - "tools.proxy.on": True, - "tools.proxy.local": "", - "tools.proxy.base": proxy_base - } - - # Now merge or add our proxy configuration information into the - # existing configuration. - for entry in proxy_conf: - conf["/"][entry] = proxy_conf[entry] - - if ll_mirror: - ds.DNSSD_Plugin(cherrypy.engine, gconf).subscribe() - - if reindex: - # Tell depot to update search indexes when possible; - # this is done as a background task so that packages - # can be served immediately while search indexes are - # still being updated. - depot._queue_refresh_index() - - # If stdin is not a tty and the pkgdepot controller isn't being used, - # then assume process should be daemonized. - if not os.environ.get("PKGDEPOT_CONTROLLER") and \ - not os.isatty(sys.stdin.fileno()): - # Translate the values in log_cfg into paths. - Daemonizer(cherrypy.engine, stderr=log_cfg["errors"], - stdout=log_cfg["access"]).subscribe() - try: - root = cherrypy.Application(depot) - cherrypy.quickstart(root, config=conf) - except Exception as _e: - emsg("pkg.depotd: unknown error starting depot server, " \ - "illegal option value specified?") - emsg(_e) - sys.exit(1) + # Now merge or add our proxy configuration information into the + # existing configuration. + for entry in proxy_conf: + conf["/"][entry] = proxy_conf[entry] + + if ll_mirror: + ds.DNSSD_Plugin(cherrypy.engine, gconf).subscribe() + + if reindex: + # Tell depot to update search indexes when possible; + # this is done as a background task so that packages + # can be served immediately while search indexes are + # still being updated. + depot._queue_refresh_index() + + # If stdin is not a tty and the pkgdepot controller isn't being used, + # then assume process should be daemonized. + if not os.environ.get("PKGDEPOT_CONTROLLER") and not os.isatty( + sys.stdin.fileno() + ): + # Translate the values in log_cfg into paths. + Daemonizer( + cherrypy.engine, stderr=log_cfg["errors"], stdout=log_cfg["access"] + ).subscribe() + + try: + root = cherrypy.Application(depot) + cherrypy.quickstart(root, config=conf) + except Exception as _e: + emsg( + "pkg.depotd: unknown error starting depot server, " + "illegal option value specified?" + ) + emsg(_e) + sys.exit(1) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/__init__.py b/src/modules/__init__.py index 944fe601e..d50ade9f0 100644 --- a/src/modules/__init__.py +++ b/src/modules/__init__.py @@ -26,4 +26,4 @@ VERSION = "unknown" # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/__init__.py b/src/modules/actions/__init__.py index 1b14e6885..4bd05b53d 100644 --- a/src/modules/actions/__init__.py +++ b/src/modules/actions/__init__.py @@ -49,9 +49,9 @@ # All modules in this package (all python files except __init__.py with their # extensions stripped off). __all__ = [ - f[:-3] - for f in os.listdir(__path__[0]) - if f.endswith(".py") and f != "__init__.py" + f[:-3] + for f in os.listdir(__path__[0]) + if f.endswith(".py") and f != "__init__.py" ] # A dictionary of all the types in this package, mapping to the classes that @@ -64,364 +64,406 @@ payload_types = {} for modname in __all__: - module = __import__("{0}.{1}".format(__name__, modname), - globals(), locals(), [modname]) - - nvlist = inspect.getmembers(module, inspect.isclass) - - # Pull the class objects out of nvlist, keeping only those that are - # actually defined in this package. - classes = [ - c[1] - for c in nvlist - if '.'.join(c[1].__module__.split('.')[:-1]) == __name__ - ] - for cls in classes: - if hasattr(cls, "name"): - types[cls.name] = cls - if hasattr(cls, "has_payload") and cls.has_payload: - payload_types[cls.name] = cls + module = __import__( + "{0}.{1}".format(__name__, modname), globals(), locals(), [modname] + ) + + nvlist = inspect.getmembers(module, inspect.isclass) + + # Pull the class objects out of nvlist, keeping only those that are + # actually defined in this package. + classes = [ + c[1] + for c in nvlist + if ".".join(c[1].__module__.split(".")[:-1]) == __name__ + ] + for cls in classes: + if hasattr(cls, "name"): + types[cls.name] = cls + if hasattr(cls, "has_payload") and cls.has_payload: + payload_types[cls.name] = cls # Clean up after ourselves del modname, module, nvlist, classes, cls class ActionError(Exception): - """Base exception class for Action errors.""" + """Base exception class for Action errors.""" + + def __str__(self): + raise NotImplementedError() - def __str__(self): - raise NotImplementedError() class ActionRetry(ActionError): - def __init__(self, *args): - ActionError.__init__(self) - self.actionstr = str(args[0]) + def __init__(self, *args): + ActionError.__init__(self) + self.actionstr = str(args[0]) + + def __str__(self): + return _("Need to try installing {action} again").format( + action=self.actionstr + ) - def __str__(self): - return _("Need to try installing {action} again").format( - action=self.actionstr) class UnknownActionError(ActionError): - def __init__(self, *args): - ActionError.__init__(self) - self.actionstr = args[0] - self.type = args[1] - - def __str__(self): - if hasattr(self, "fmri") and self.fmri is not None: - return _("unknown action type '{type}' in package " - "'{fmri}' in action '{action}'").format( - type=self.type, fmri=self.fmri, - action=self.actionstr) - return _("unknown action type '{type}' in action " - "'{action}'").format(type=self.type, - action=self.actionstr) + def __init__(self, *args): + ActionError.__init__(self) + self.actionstr = args[0] + self.type = args[1] + + def __str__(self): + if hasattr(self, "fmri") and self.fmri is not None: + return _( + "unknown action type '{type}' in package " + "'{fmri}' in action '{action}'" + ).format(type=self.type, fmri=self.fmri, action=self.actionstr) + return _("unknown action type '{type}' in action " "'{action}'").format( + type=self.type, action=self.actionstr + ) + class MalformedActionError(ActionError): - def __init__(self, *args): - ActionError.__init__(self) - self.actionstr = args[0] - self.position = args[1] - self.errorstr = args[2] - - def __str__(self): - marker = " " * (4 + self.position) + "^" - if hasattr(self, "fmri") and self.fmri is not None: - return _("Malformed action in package '{fmri}' at " - "position: {pos:d}: {error}:\n {action}\n" - "{marker}").format(fmri=self.fmri, - pos=self.position, action=self.actionstr, - marker=marker, error=self.errorstr) - return _("Malformed action at position: {pos:d}: {error}:\n " - "{action}\n{marker}").format(pos=self.position, - action=self.actionstr, marker=marker, - error=self.errorstr) + def __init__(self, *args): + ActionError.__init__(self) + self.actionstr = args[0] + self.position = args[1] + self.errorstr = args[2] + + def __str__(self): + marker = " " * (4 + self.position) + "^" + if hasattr(self, "fmri") and self.fmri is not None: + return _( + "Malformed action in package '{fmri}' at " + "position: {pos:d}: {error}:\n {action}\n" + "{marker}" + ).format( + fmri=self.fmri, + pos=self.position, + action=self.actionstr, + marker=marker, + error=self.errorstr, + ) + return _( + "Malformed action at position: {pos:d}: {error}:\n " + "{action}\n{marker}" + ).format( + pos=self.position, + action=self.actionstr, + marker=marker, + error=self.errorstr, + ) class ActionDataError(ActionError): - """Used to indicate that a file-related error occuring during action - initialization.""" + """Used to indicate that a file-related error occuring during action + initialization.""" - def __init__(self, *args, **kwargs): - ActionError.__init__(self) - self.error = args[0] - self.path = kwargs.get("path", None) + def __init__(self, *args, **kwargs): + ActionError.__init__(self) + self.error = args[0] + self.path = kwargs.get("path", None) - def __str__(self): - return str(self.error) + def __str__(self): + return str(self.error) class InvalidActionError(ActionError): - """Used to indicate that attributes provided were invalid, or required - attributes were missing for an action.""" + """Used to indicate that attributes provided were invalid, or required + attributes were missing for an action.""" - def __init__(self, *args): - ActionError.__init__(self) - self.actionstr = args[0] - self.errorstr = args[1] + def __init__(self, *args): + ActionError.__init__(self) + self.actionstr = args[0] + self.errorstr = args[1] - def __str__(self): - if hasattr(self, "fmri") and self.fmri is not None: - return _("invalid action in package {fmri}: " - "{action}: {error}").format(fmri=self.fmri, - action=self.actionstr, error=self.errorstr) - return _("invalid action, '{action}': {error}").format( - action=self.actionstr, error=self.errorstr) + def __str__(self): + if hasattr(self, "fmri") and self.fmri is not None: + return _( + "invalid action in package {fmri}: " "{action}: {error}" + ).format(fmri=self.fmri, action=self.actionstr, error=self.errorstr) + return _("invalid action, '{action}': {error}").format( + action=self.actionstr, error=self.errorstr + ) class MissingKeyAttributeError(InvalidActionError): - """Used to indicate that an action's key attribute is missing.""" + """Used to indicate that an action's key attribute is missing.""" - def __init__(self, *args): - InvalidActionError.__init__(self, str(args[0]), - _("no value specified for key attribute '{0}'").format( - args[1])) + def __init__(self, *args): + InvalidActionError.__init__( + self, + str(args[0]), + _("no value specified for key attribute '{0}'").format(args[1]), + ) class KeyAttributeMultiValueError(InvalidActionError): - """Used to indicate that an action's key attribute was specified - multiple times for an action that expects it only once.""" + """Used to indicate that an action's key attribute was specified + multiple times for an action that expects it only once.""" - def __init__(self, *args): - InvalidActionError.__init__(self, str(args[0]), - _("{0} attribute may only be specified once").format( - args[1])) + def __init__(self, *args): + InvalidActionError.__init__( + self, + str(args[0]), + _("{0} attribute may only be specified once").format(args[1]), + ) class InvalidPathAttributeError(InvalidActionError): - """Used to indicate that an action's path attribute value was either - empty, '/', or not a string.""" + """Used to indicate that an action's path attribute value was either + empty, '/', or not a string.""" - def __init__(self, *args): - InvalidActionError.__init__(self, str(args[0]), - _("Empty or invalid path attribute")) + def __init__(self, *args): + InvalidActionError.__init__( + self, str(args[0]), _("Empty or invalid path attribute") + ) class InvalidActionAttributesError(ActionError): - """Used to indicate that one or more action attributes were invalid.""" + """Used to indicate that one or more action attributes were invalid.""" - def __init__(self, act, errors, fmri=None): - """'act' is an Action (object or string). + def __init__(self, act, errors, fmri=None): + """'act' is an Action (object or string). - 'errors' is a list of tuples of the form (name, error) where - 'name' is the action attribute name, and 'error' is a string - indicating what attribute is invalid and why. + 'errors' is a list of tuples of the form (name, error) where + 'name' is the action attribute name, and 'error' is a string + indicating what attribute is invalid and why. - 'fmri' is an optional package FMRI (object or string) - indicating what package contained the actions with invalid - attributes.""" + 'fmri' is an optional package FMRI (object or string) + indicating what package contained the actions with invalid + attributes.""" - ActionError.__init__(self) - self.action = act - self.errors = errors - self.fmri = fmri + ActionError.__init__(self) + self.action = act + self.errors = errors + self.fmri = fmri - def __str__(self): - act_errors = "\n ".join(err for name, err in self.errors) - if self.fmri: - return _("The action '{action}' in package " - "'{fmri}' has invalid attribute(s):\n" - " {act_errors}").format(action=self.action, - fmri=self.fmri, act_errors=act_errors) - return _("The action '{action}' has invalid attribute(s):\n" - " {act_errors}").format(action=self.action, - act_errors=act_errors) + def __str__(self): + act_errors = "\n ".join(err for name, err in self.errors) + if self.fmri: + return _( + "The action '{action}' in package " + "'{fmri}' has invalid attribute(s):\n" + " {act_errors}" + ).format(action=self.action, fmri=self.fmri, act_errors=act_errors) + return _( + "The action '{action}' has invalid attribute(s):\n" " {act_errors}" + ).format(action=self.action, act_errors=act_errors) # This must be imported *after* all of the exception classes are defined as # _actions module init needs the exception objects. from ._actions import fromstr + def attrsfromstr(string): - """Create an attribute dict given a string w/ key=value pairs. + """Create an attribute dict given a string w/ key=value pairs. + + Raises MalformedActionError if the attributes have syntactic problems. + """ + return fromstr("unknown {0}".format(string)).attrs - Raises MalformedActionError if the attributes have syntactic problems. - """ - return fromstr("unknown {0}".format(string)).attrs def internalizelist(atype, args, ahash=None, basedirs=None): - """Create an action instance based on a sequence of "key=value" strings. - This function also translates external representations of actions with - payloads (like file and license which can use NOHASH or file paths to - point to the payload) to an internal representation which sets the - data field of the action returned. + """Create an action instance based on a sequence of "key=value" strings. + This function also translates external representations of actions with + payloads (like file and license which can use NOHASH or file paths to + point to the payload) to an internal representation which sets the + data field of the action returned. - The "atype" parameter is the type of action to be built. + The "atype" parameter is the type of action to be built. - The "args" parameter is the sequence of "key=value" strings. + The "args" parameter is the sequence of "key=value" strings. - The "ahash" parameter is used to set the hash value for the action. + The "ahash" parameter is used to set the hash value for the action. - The "basedirs" parameter is the list of directories to look in to find - any payload for the action. + The "basedirs" parameter is the list of directories to look in to find + any payload for the action. - Raises MalformedActionError if the attribute strings are malformed. - """ + Raises MalformedActionError if the attribute strings are malformed. + """ - if atype not in types: - raise UnknownActionError(("{0} {1}".format(atype, - " ".join(args))).strip(), atype) + if atype not in types: + raise UnknownActionError( + ("{0} {1}".format(atype, " ".join(args))).strip(), atype + ) - data = None + data = None - if atype in ("file", "license"): - data = args.pop(0) + if atype in ("file", "license"): + data = args.pop(0) - attrs = {} + attrs = {} - try: - # list comprehension in Python 3 doesn't leak loop control - # variable to surrounding variable, so use a regular loop - for kv in args: - a, v = kv.split("=", 1) - if v == '' or a == '': - kvi = args.index(kv) + 1 - p1 = " ".join(args[:kvi]) - p2 = " ".join(args[kvi:]) - raise MalformedActionError( - "{0} {1} {2}".format(atype, p1, p2), - len(p1) + 1, - "attribute '{0}'".format(kv)) - - # This is by far the common case-- an attribute with - # a single value. - if a not in attrs: - attrs[a] = v - else: - av = attrs[a] - if isinstance(av, list): - attrs[a].append(v) - else: - attrs[a] = [ av, v ] - except ValueError: - # We're only here if the for: statement above throws a - # MalformedActionError. That can happen if split yields a - # single element, which is possible if e.g. an attribute lacks - # an =. + try: + # list comprehension in Python 3 doesn't leak loop control + # variable to surrounding variable, so use a regular loop + for kv in args: + a, v = kv.split("=", 1) + if v == "" or a == "": kvi = args.index(kv) + 1 p1 = " ".join(args[:kvi]) p2 = " ".join(args[kvi:]) - raise MalformedActionError("{0} {1} {2}".format(atype, p1, p2), - len(p1) + 2, "attribute '{0}'".format(kv)) - - # keys called 'data' cause problems due to the named parameter being - # passed to the action constructor below. Check for these. Note that - # _fromstr also checks for this. - if "data" in attrs: - astr = atype + " " + " ".join(args) - raise InvalidActionError(astr, - "{0} action cannot have a 'data' attribute".format( - atype)) - - action = types[atype](data, **attrs) - if ahash: - action.hash = ahash - - local_path, used_basedir = set_action_data(data, action, - basedirs=basedirs) - return action, local_path + raise MalformedActionError( + "{0} {1} {2}".format(atype, p1, p2), + len(p1) + 1, + "attribute '{0}'".format(kv), + ) + + # This is by far the common case-- an attribute with + # a single value. + if a not in attrs: + attrs[a] = v + else: + av = attrs[a] + if isinstance(av, list): + attrs[a].append(v) + else: + attrs[a] = [av, v] + except ValueError: + # We're only here if the for: statement above throws a + # MalformedActionError. That can happen if split yields a + # single element, which is possible if e.g. an attribute lacks + # an =. + kvi = args.index(kv) + 1 + p1 = " ".join(args[:kvi]) + p2 = " ".join(args[kvi:]) + raise MalformedActionError( + "{0} {1} {2}".format(atype, p1, p2), + len(p1) + 2, + "attribute '{0}'".format(kv), + ) + + # keys called 'data' cause problems due to the named parameter being + # passed to the action constructor below. Check for these. Note that + # _fromstr also checks for this. + if "data" in attrs: + astr = atype + " " + " ".join(args) + raise InvalidActionError( + astr, "{0} action cannot have a 'data' attribute".format(atype) + ) + + action = types[atype](data, **attrs) + if ahash: + action.hash = ahash + + local_path, used_basedir = set_action_data(data, action, basedirs=basedirs) + return action, local_path + def internalizestr(string, basedirs=None, load_data=True): - """Create an action instance based on a sequence of strings. - This function also translates external representations of actions with - payloads (like file and license which can use NOHASH or file paths to - point to the payload) to an internal representation which sets the - data field of the action returned. + """Create an action instance based on a sequence of strings. + This function also translates external representations of actions with + payloads (like file and license which can use NOHASH or file paths to + point to the payload) to an internal representation which sets the + data field of the action returned. - In general, each string should be in the form of "key=value". The - exception is a payload for certain actions which should be the first - item in the sequence. + In general, each string should be in the form of "key=value". The + exception is a payload for certain actions which should be the first + item in the sequence. - Raises MalformedActionError if the attribute strings are malformed. - """ + Raises MalformedActionError if the attribute strings are malformed. + """ - action = fromstr(string) + action = fromstr(string) - if action.name not in ("file", "license") or not load_data: - return action, None, None + if action.name not in ("file", "license") or not load_data: + return action, None, None + + local_path, used_basedir = set_action_data( + action.hash, action, basedirs=basedirs + ) + return action, local_path, used_basedir - local_path, used_basedir = set_action_data(action.hash, action, - basedirs=basedirs) - return action, local_path, used_basedir def set_action_data(payload, action, basedirs=None, bundles=None): - """Sets the data field of an action using the information in the - payload and returns the actual path used to set the data and the - source used to find the data (this may be a path or a bundle - object). - - The "payload" parameter is the representation of the data to assign to - the action's data field. It can either be NOHASH or a path to the file. - - The "action" parameter is the action to modify. - - The "basedirs" parameter contains the directories to examine to find the - payload in. - - The "bundles" parameter contains a list of bundle objects to find the - payload in. - - "basedirs" and/or "bundles" must be specified. - """ - - if not payload: - return None, None - - if payload == "NOHASH": - try: - filepath = os.path.sep + action.attrs["path"] - except KeyError: - raise InvalidPathAttributeError(action) - else: - filepath = payload - - if not basedirs: - basedirs = [] - if not bundles: - bundles = [] - - # Attempt to find directory or bundle containing source of file data. - data = None - used_src = None - path = filepath.lstrip(os.path.sep) - for bd in basedirs: - # look for file in specified dir - npath = os.path.join(bd, path) - if os.path.isfile(npath): - used_src = bd - data = npath - break - else: - for bundle in bundles: - act = bundle.get_action(path) - if act: - data = act.data - used_src = bundle - action.attrs["pkg.size"] = \ - act.attrs["pkg.size"] - break - - if not data and basedirs: - raise ActionDataError(_("Action payload '{name}' " - "was not found in any of the provided locations:" - "\n{basedirs}").format(name=filepath, - basedirs="\n".join(basedirs)), path=filepath) - elif not data and bundles: - raise ActionDataError(_("Action payload '{name}' was " - "not found in any of the provided sources:" - "\n{sources}").format(name=filepath, - sources="\n".join(b.filename for b in bundles)), - path=filepath) - elif not data: - # Only if no explicit sources were provided should a - # fallback to filepath be performed. - data = filepath - - # This relies on data having a value set by the code above so that an - # ActionDataError will be raised if the file doesn't exist or is not - # accessible. - action.set_data(data) - return data, used_src + """Sets the data field of an action using the information in the + payload and returns the actual path used to set the data and the + source used to find the data (this may be a path or a bundle + object). + + The "payload" parameter is the representation of the data to assign to + the action's data field. It can either be NOHASH or a path to the file. + + The "action" parameter is the action to modify. + + The "basedirs" parameter contains the directories to examine to find the + payload in. + + The "bundles" parameter contains a list of bundle objects to find the + payload in. + + "basedirs" and/or "bundles" must be specified. + """ + + if not payload: + return None, None + + if payload == "NOHASH": + try: + filepath = os.path.sep + action.attrs["path"] + except KeyError: + raise InvalidPathAttributeError(action) + else: + filepath = payload + + if not basedirs: + basedirs = [] + if not bundles: + bundles = [] + + # Attempt to find directory or bundle containing source of file data. + data = None + used_src = None + path = filepath.lstrip(os.path.sep) + for bd in basedirs: + # look for file in specified dir + npath = os.path.join(bd, path) + if os.path.isfile(npath): + used_src = bd + data = npath + break + else: + for bundle in bundles: + act = bundle.get_action(path) + if act: + data = act.data + used_src = bundle + action.attrs["pkg.size"] = act.attrs["pkg.size"] + break + + if not data and basedirs: + raise ActionDataError( + _( + "Action payload '{name}' " + "was not found in any of the provided locations:" + "\n{basedirs}" + ).format(name=filepath, basedirs="\n".join(basedirs)), + path=filepath, + ) + elif not data and bundles: + raise ActionDataError( + _( + "Action payload '{name}' was " + "not found in any of the provided sources:" + "\n{sources}" + ).format( + name=filepath, + sources="\n".join(b.filename for b in bundles), + ), + path=filepath, + ) + elif not data: + # Only if no explicit sources were provided should a + # fallback to filepath be performed. + data = filepath + + # This relies on data having a value set by the code above so that an + # ActionDataError will be raised if the file doesn't exist or is not + # accessible. + action.set_data(data) + return data, used_src + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/attribute.py b/src/modules/actions/attribute.py index b50ddebe0..d1574d1c0 100644 --- a/src/modules/actions/attribute.py +++ b/src/modules/actions/attribute.py @@ -35,152 +35,159 @@ import pkg.actions import six -class AttributeAction(generic.Action): - """Class representing a package attribute.""" - - __slots__ = ["value"] - name = "set" - key_attr = "name" - ordinality = generic._orderdict[name] +class AttributeAction(generic.Action): + """Class representing a package attribute.""" + + __slots__ = ["value"] + + name = "set" + key_attr = "name" + ordinality = generic._orderdict[name] + + def __init__(self, data=None, **attrs): + generic.Action.__init__(self, data, **attrs) + + try: + self.attrs["name"] + self.attrs["value"] + except KeyError: + # For convenience, we allow people to express attributes as + # "=", rather than "name= value=", but + # we always convert to the latter. + try: + if len(attrs) == 1: + ( + self.attrs["name"], + self.attrs["value"], + ) = self.attrs.popitem() + return + except KeyError: + pass + raise pkg.actions.InvalidActionError( + str(self), 'Missing "name" or "value" attribute' + ) + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + if self.has_category_info(): + try: + return [ + ( + self.name, + self.attrs["name"], + [all_levels] + + [t.split() for t in all_levels.split("/")], + all_levels, + ) + for scheme, all_levels in self.parse_category_info() + ] + except ValueError: + pass + + if isinstance(self.attrs["value"], list): + tmp = [] + for v in self.attrs["value"]: + assert isinstance(v, six.string_types) + if " " in v: + words = v.split() + for w in words: + tmp.append((self.name, self.attrs["name"], w, v)) + else: + tmp.append((self.name, self.attrs["name"], v, None)) + return tmp + elif self.attrs["name"] in ("fmri", "pkg.fmri"): + fmri_obj = pkg.fmri.PkgFmri(self.attrs["value"]) + + lst = [ + fmri_obj.get_pkg_stem(include_scheme=False), + str(fmri_obj.version.build_release), + str(fmri_obj.version.release), + str(fmri_obj.version.timestr), + ] + lst.extend(fmri_obj.hierarchical_names()) + return [ + ( + self.name, + self.attrs["name"], + w, + fmri_obj.get_pkg_stem(include_scheme=False), + ) + for w in lst + ] + + elif " " in self.attrs["value"]: + v = self.attrs["value"] + return [(self.name, self.attrs["name"], w, v) for w in v.split()] + else: + return [(self.name, self.attrs["name"], self.attrs["value"], None)] + + def has_category_info(self): + return self.attrs["name"] == "info.classification" + + def parse_category_info(self): + rval = [] + # Some logic is inlined here for performance reasons. + if self.attrs["name"] != "info.classification": + return rval + + for val in self.attrlist("value"): + if ":" in val: + scheme, cats = val.split(":", 1) + else: + scheme = "" + cats = val + rval.append((scheme, cats)) + return rval + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action. + """ + + name = self.attrs["name"] + if name in ( + "pkg.summary", + "pkg.obsolete", + "pkg.renamed", + "pkg.legacy", + "pkg.description", + "pkg.depend.explicit-install", + ): + # If set action is for any of the above, only a single + # value is permitted. + generic.Action._validate(self, fmri=fmri, single_attrs=("value",)) + elif name.startswith("pkg.additional-"): + # For pkg actuators, just test that the values are valid + # FMRIs. We want to prevent the system from failing when + # newer, currently unknown actuators are encountered. + errors = [] + fmris = self.attrlist("value") + for f in fmris: + try: + pkg.fmri.PkgFmri(f) + except pkg.fmri.IllegalFmri as e: + errors.append((name, str(e))) + if errors: + raise pkg.actions.InvalidActionAttributesError( + self, errors, fmri=fmri + ) - def __init__(self, data=None, **attrs): - generic.Action.__init__(self, data, **attrs) + generic.Action._validate(self, fmri=fmri) + else: + # In all other cases, multiple values are assumed to be + # permissible. + generic.Action._validate(self, fmri=fmri) - try: - self.attrs["name"] - self.attrs["value"] - except KeyError: - # For convenience, we allow people to express attributes as - # "=", rather than "name= value=", but - # we always convert to the latter. - try: - if len(attrs) == 1: - self.attrs["name"], self.attrs["value"] = \ - self.attrs.popitem() - return - except KeyError: - pass - raise pkg.actions.InvalidActionError(str(self), - 'Missing "name" or "value" attribute') - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - if self.has_category_info(): - try: - - return [ - (self.name, self.attrs["name"], - [all_levels] + - [t.split() for t in all_levels.split("/")], - all_levels) - for scheme, all_levels - in self.parse_category_info() - ] - except ValueError: - pass - - if isinstance(self.attrs["value"], list): - tmp = [] - for v in self.attrs["value"]: - assert isinstance(v, six.string_types) - if " " in v: - words = v.split() - for w in words: - tmp.append((self.name, - self.attrs["name"], w, - v)) - else: - tmp.append((self.name, - self.attrs["name"], v, None)) - return tmp - elif self.attrs["name"] in ("fmri", "pkg.fmri"): - fmri_obj = pkg.fmri.PkgFmri(self.attrs["value"]) - - lst = [ - fmri_obj.get_pkg_stem(include_scheme=False), - str(fmri_obj.version.build_release), - str(fmri_obj.version.release), - str(fmri_obj.version.timestr) - ] - lst.extend(fmri_obj.hierarchical_names()) - return [ - (self.name, self.attrs["name"], w, - fmri_obj.get_pkg_stem(include_scheme=False)) - for w in lst - ] - - elif " " in self.attrs["value"]: - v = self.attrs["value"] - return [ - (self.name, self.attrs["name"], w, v) - for w in v.split() - ] - else: - return [ - (self.name, self.attrs["name"], - self.attrs["value"], None) - ] - - def has_category_info(self): - return self.attrs["name"] == "info.classification" - - def parse_category_info(self): - rval = [] - # Some logic is inlined here for performance reasons. - if self.attrs["name"] != "info.classification": - return rval - - for val in self.attrlist("value"): - if ":" in val: - scheme, cats = val.split(":", 1) - else: - scheme = "" - cats = val - rval.append((scheme, cats)) - return rval - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action. - """ - - name = self.attrs["name"] - if name in ("pkg.summary", "pkg.obsolete", "pkg.renamed", - "pkg.legacy", "pkg.description", "pkg.depend.explicit-install"): - # If set action is for any of the above, only a single - # value is permitted. - generic.Action._validate(self, fmri=fmri, - single_attrs=("value",)) - elif name.startswith("pkg.additional-"): - # For pkg actuators, just test that the values are valid - # FMRIs. We want to prevent the system from failing when - # newer, currently unknown actuators are encountered. - errors = [] - fmris = self.attrlist("value") - for f in fmris: - try: - pkg.fmri.PkgFmri(f) - except pkg.fmri.IllegalFmri as e: - errors.append((name, str(e))) - if errors: - raise pkg.actions.InvalidActionAttributesError( - self, errors, fmri=fmri) - - generic.Action._validate(self, fmri=fmri) - else: - # In all other cases, multiple values are assumed to be - # permissible. - generic.Action._validate(self, fmri=fmri) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/depend.py b/src/modules/actions/depend.py index 1a3b43034..eac4d1859 100644 --- a/src/modules/actions/depend.py +++ b/src/modules/actions/depend.py @@ -52,7 +52,8 @@ "origin", "parent", "require", - "require-any") + "require-any", +) # # this is a special package name that when present in an fmri defines a @@ -61,6 +62,7 @@ # DEPEND_SELF = "feature/package/dependency/self" + class DependencyAction(generic.Action): """Class representing a dependency packaging object. The fmri attribute is expected to be the pkg FMRI that this package depends on. The type @@ -112,31 +114,33 @@ def __init__(self, data=None, **attrs): generic.Action.__init__(self, data, **attrs) def __check_parent_installed(self, image, pkg_fmri, fmri): - if not image.linked.ischild(): # if we're not a linked child then ignore "parent" # dependencies. return [] # create a dictionary of packages installed in the parent - ppkgs_dict = dict([ - (i.pkg_name, i) - for i in image.linked.parent_fmris() - ]) + ppkgs_dict = dict( + [(i.pkg_name, i) for i in image.linked.parent_fmris()] + ) errors = [] if fmri.pkg_name not in ppkgs_dict: errors.append( - _("Package is not installed in " - "parent image {0}").format(fmri.pkg_name)) + _("Package is not installed in " "parent image {0}").format( + fmri.pkg_name + ) + ) return errors pf = ppkgs_dict[fmri.pkg_name] if fmri.publisher and fmri.publisher != pf.publisher: # package is from a different publisher - errors.append(_("Package in parent is from a " - "different publisher: {0}"). - format(pf)) + errors.append( + _( + "Package in parent is from a " "different publisher: {0}" + ).format(pf) + ) return errors # This intentionally mirrors the logic in @@ -145,55 +149,73 @@ def __check_parent_installed(self, image, pkg_fmri, fmri): # parent dependency is satisfied, which applies to both # DEPEND_SELF and other cases return [] - elif (pkg_fmri != fmri and - pf.version.is_successor(fmri.version, - pkg.version.CONSTRAINT_NONE)): + elif pkg_fmri != fmri and pf.version.is_successor( + fmri.version, pkg.version.CONSTRAINT_NONE + ): # *not* DEPEND_SELF; parent dependency is satisfied return [] - if pf.version.is_successor(fmri.version, - pkg.version.CONSTRAINT_NONE): - errors.append(_("Parent image has a newer " - "version of package {0}").format(pf)) + if pf.version.is_successor(fmri.version, pkg.version.CONSTRAINT_NONE): + errors.append( + _("Parent image has a newer " "version of package {0}").format( + pf + ) + ) else: - errors.append(_("Parent image has an older " - "version of package {0}").format(pf)) + errors.append( + _("Parent image has an older " "version of package {0}").format( + pf + ) + ) return errors - def __check_installed(self, image, installed_version, min_fmri, - max_fmri, required, ctype): + def __check_installed( + self, image, installed_version, min_fmri, max_fmri, required, ctype + ): errors = [] if not installed_version: return errors vi = installed_version.version - if min_fmri and min_fmri.version and \ - min_fmri.version.is_successor( - vi, pkg.version.CONSTRAINT_NONE): + if ( + min_fmri + and min_fmri.version + and min_fmri.version.is_successor(vi, pkg.version.CONSTRAINT_NONE) + ): errors.append( - _("{dep_type} dependency {dep_val} " - "is downrev ({inst_ver})").format( - dep_type=ctype, dep_val=min_fmri, - inst_ver=installed_version)) + _( + "{dep_type} dependency {dep_val} " "is downrev ({inst_ver})" + ).format( + dep_type=ctype, dep_val=min_fmri, inst_ver=installed_version + ) + ) return errors - if max_fmri and max_fmri.version and \ - vi > max_fmri.version and \ - not vi.is_successor(max_fmri.version, - pkg.version.CONSTRAINT_AUTO): + if ( + max_fmri + and max_fmri.version + and vi > max_fmri.version + and not vi.is_successor( + max_fmri.version, pkg.version.CONSTRAINT_AUTO + ) + ): errors.append( - _("{dep_type} dependency {dep_val} " - "is uprev ({inst_ver})").format( - dep_type=ctype, dep_val=max_fmri, - inst_ver=installed_version)) + _( + "{dep_type} dependency {dep_val} " "is uprev ({inst_ver})" + ).format( + dep_type=ctype, dep_val=max_fmri, inst_ver=installed_version + ) + ) return errors - if required and pkgdefs.PKG_STATE_OBSOLETE in \ - image.get_pkg_state(installed_version): + if required and pkgdefs.PKG_STATE_OBSOLETE in image.get_pkg_state( + installed_version + ): errors.append( - _("{dep_type} dependency on an obsolete " - "package ({obs_pkg}); this package must " - "be uninstalled manually").format( - dep_type=ctype, - obs_pkg=installed_version)) + _( + "{dep_type} dependency on an obsolete " + "package ({obs_pkg}); this package must " + "be uninstalled manually" + ).format(dep_type=ctype, obs_pkg=installed_version) + ) return errors return errors @@ -218,8 +240,8 @@ def __min_version(): if ctype not in known_types: errors.append( - _("Unknown type ({0}) in depend action"). - format(ctype)) + _("Unknown type ({0}) in depend action").format(ctype) + ) return errors, warnings, info # get a list of fmris and do fmri token substitution @@ -233,14 +255,12 @@ def __min_version(): if ctype == "parent": # handle "parent" dependencies here assert len(pfmris) == 1 - errors.extend(self.__check_parent_installed( - image, pfmri, pfmris[0])) + errors.extend( + self.__check_parent_installed(image, pfmri, pfmris[0]) + ) return errors, warnings, info - installed_versions = [ - image.get_version_installed(f) - for f in pfmris - ] + installed_versions = [image.get_version_installed(f) for f in pfmris] installed_version = installed_versions[0] pfmri = pfmris[0] @@ -249,9 +269,11 @@ def __min_version(): max_fmri = None required = False - avoids = (image.avoid_set_get() | - image.avoid_set_get(implicit=True) | - image.obsolete_set_get()) + avoids = ( + image.avoid_set_get() + | image.avoid_set_get(implicit=True) + | image.obsolete_set_get() + ) if ctype == "require": required = True @@ -268,8 +290,10 @@ def __min_version(): elif ctype == "conditional": cfmri = pkg.fmri.PkgFmri(self.attrs["predicate"]) installed_cversion = image.get_version_installed(cfmri) - if installed_cversion is not None and \ - installed_cversion.is_successor(cfmri): + if ( + installed_cversion is not None + and installed_cversion.is_successor(cfmri) + ): min_fmri = pfmri required = True elif ctype == "group": @@ -277,11 +301,11 @@ def __min_version(): required = True elif ctype == "group-any": installed_stems = set( - f.pkg_name for f in installed_versions - if f is not None) + f.pkg_name for f in installed_versions if f is not None + ) group_stems = set( - f.pkg_name for f in pfmris - if f.pkg_name not in avoids) + f.pkg_name for f in pfmris if f.pkg_name not in avoids + ) matching_stems = installed_stems & group_stems # If there are stems for this group-any dependency not @@ -290,14 +314,14 @@ def __min_version(): if group_stems and not matching_stems: stems = ", ".join(p for p in group_stems) errors.append( - _("Group dependency on one of {0} not " - "met").format(stems)) + _("Group dependency on one of {0} not " "met").format(stems) + ) return errors, warnings, info elif ctype == "require-any": for ifmri, rpfmri in zip(installed_versions, pfmris): - e = self.__check_installed(image, ifmri, - rpfmri, None, True, - ctype) + e = self.__check_installed( + image, ifmri, rpfmri, None, True, ctype + ) if ifmri and not e: # this one is present and happy return [], [], [] @@ -305,13 +329,14 @@ def __min_version(): errors.extend(e) if not errors: # none was installed errors.append( - _("Required dependency on one of " - "{0} not met"). - format(", ".join((str(p) - for p in pfmris)))) + _("Required dependency on one of " "{0} not met").format( + ", ".join((str(p) for p in pfmris)) + ) + ) return errors, warnings, info elif ctype == "origin" and pfmri.pkg_name.startswith( - "feature/firmware/"): + "feature/firmware/" + ): ok, reason = Firmware().check_firmware(self, pfmri.pkg_name) if ok: return [], [], [] @@ -320,14 +345,16 @@ def __min_version(): # do checking for other dependency types - errors.extend(self.__check_installed(image, - installed_version, - min_fmri, max_fmri, - required, ctype)) + errors.extend( + self.__check_installed( + image, installed_version, min_fmri, max_fmri, required, ctype + ) + ) if required and not installed_version: - errors.append(_("Required dependency {0} is not " - "installed").format(pfmri)) + errors.append( + _("Required dependency {0} is not " "installed").format(pfmri) + ) # cannot verify origin since it applys to upgrade # operation, not final state @@ -361,9 +388,7 @@ def generate_indices(self): # from 'pkg://' upto the first slash. p = pat.sub("", p) # Note that this creates a directory hierarchy! - inds.append( - ("depend", ctype, p, None) - ) + inds.append(("depend", ctype, p, None)) if "@" in p: stem = p.split("@")[0] @@ -434,7 +459,7 @@ def grow(a, b, rem_values, force_nl=False): # Note this length comparison doesn't include # the space used to append the second part of # the string. - if (len(a) - lastnl + len(b) < max_len): + if len(a) - lastnl + len(b) < max_len: return a + " " + b return a + JOIN_TOK + b @@ -444,17 +469,14 @@ def astr(aout): first_line = True # Total number of remaining attribute values to output. - rem_count = sum(len(act.attrlist(k)) - for k in act.attrs) + rem_count = sum(len(act.attrlist(k)) for k in act.attrs) # Now build the action output string an attribute at a # time. - for k, v in sorted(six.iteritems(act.attrs), - key=key_func): + for k, v in sorted(six.iteritems(act.attrs), key=key_func): # Newline breaks are only forced when there is # more than one value for an attribute. - if not (isinstance(v, list) or - isinstance(v, set)): + if not (isinstance(v, list) or isinstance(v, set)): nv = [v] use_force_nl = False else: @@ -462,19 +484,19 @@ def astr(aout): use_force_nl = True for lmt in sorted(nv): - force_nl = use_force_nl and \ - k.startswith("pkg.debug") - aout = grow(aout, "=".join( - (k, - generic.quote_attr_value( - lmt))), + force_nl = use_force_nl and k.startswith("pkg.debug") + aout = grow( + aout, + "=".join((k, generic.quote_attr_value(lmt))), rem_count, - force_nl=force_nl) + force_nl=force_nl, + ) # Must be done for each value. if first_line and JOIN_TOK in aout: first_line = False rem_count -= 1 return aout + return astr(out) def validate(self, fmri=None): @@ -493,29 +515,48 @@ def validate(self, fmri=None): if dtype == "conditional": required_attrs.append("predicate") - single_attrs = ["predicate", "root-image", "ignore-check", - "type"] + single_attrs = ["predicate", "root-image", "ignore-check", "type"] if dtype not in ("group-any", "require-any"): # Other dependency types only expect a single value. single_attrs.append("fmri") errors = generic.Action._validate( - self, fmri=fmri, raise_errors=False, - required_attrs=required_attrs, single_attrs=single_attrs) - - if (isinstance(dtype, six.string_types) and - dtype not in known_types): - errors.append(("type", - _("Unknown type '{0}' in depend action"). - format(self.attrs["type"]))) + self, + fmri=fmri, + raise_errors=False, + required_attrs=required_attrs, + single_attrs=single_attrs, + ) + + if isinstance(dtype, six.string_types) and dtype not in known_types: + errors.append( + ( + "type", + _("Unknown type '{0}' in depend action").format( + self.attrs["type"] + ), + ) + ) if "predicate" in self.attrs and dtype != "conditional": - errors.append(("predicate", - _("a predicate may only be specified " - "for conditional dependencies"))) + errors.append( + ( + "predicate", + _( + "a predicate may only be specified " + "for conditional dependencies" + ), + ) + ) if "root-image" in self.attrs and dtype != "origin": - errors.append(("root-image", - _("the root-image attribute is only " - "valid for origin dependencies"))) + errors.append( + ( + "root-image", + _( + "the root-image attribute is only " + "valid for origin dependencies" + ), + ) + ) # Logic here intentionally treats 'predicate' and 'fmri' as # having multiple values for simplicity. @@ -523,24 +564,25 @@ def validate(self, fmri=None): for f in self.attrlist(attr): try: pkg.fmri.PkgFmri(f) - except (pkg.version.VersionError, - pkg.fmri.FmriError) as e: + except (pkg.version.VersionError, pkg.fmri.FmriError) as e: if attr == "fmri" and f == "__TBD": # pkgdepend uses this special # value. continue - errors.append(( - attr, - _("invalid {attr} value " - "'{value}': {error}"). - format(attr=attr, - value=f, - error=str(e)))) + errors.append( + ( + attr, + _( + "invalid {attr} value " "'{value}': {error}" + ).format(attr=attr, value=f, error=str(e)), + ) + ) if errors: raise pkg.actions.InvalidActionAttributesError( - self, - errors, fmri=fmri) + self, errors, fmri=fmri + ) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/directory.py b/src/modules/actions/directory.py index 698fb8cd5..0f420476d 100644 --- a/src/modules/actions/directory.py +++ b/src/modules/actions/directory.py @@ -37,262 +37,281 @@ import pkg.client.api_errors as apx import stat + class DirectoryAction(generic.Action): - """Class representing a directory-type packaging object.""" - - __slots__ = [] - - name = "dir" - key_attr = "path" - unique_attrs = "path", "mode", "owner", "group" - globally_identical = True - refcountable = True - namespace_group = "path" - ordinality = generic._orderdict[name] - - def compare(self, other): - return (self.attrs["path"] > other.attrs["path"]) - \ - (self.attrs["path"] < other.attrs["path"]) - - def differences(self, other): - """Returns a list of attributes that have different values - between 'other' and 'self'. This differs from the generic - Action's differences() method in that it normalizes the 'mode' - attribute so that, say, '0755' and '755' are treated as - identical.""" - - diffs = generic.Action.differences(self, other) - - if "mode" in diffs and \ - int(self.attrs.get("mode", "0"), 8) == int(other.attrs.get("mode", "0"), 8): - diffs.remove("mode") - - return diffs - - def directory_references(self): - return [os.path.normpath(self.attrs["path"])] - - def __create_directory(self, pkgplan, path, mode, **kwargs): - """Create a directory.""" - - try: - self.makedirs(path, mode=mode, - fmri=pkgplan.destination_fmri, **kwargs) - except OSError as e: - if e.filename != path: - # makedirs failed for some component - # of the path. - raise - - fs = os.lstat(path) - fs_mode = stat.S_IFMT(fs.st_mode) - if e.errno == errno.EROFS: - # Treat EROFS like EEXIST if both are - # applicable, since we'll end up with - # EROFS instead. - if stat.S_ISDIR(fs_mode): - return - raise - elif e.errno != errno.EEXIST: - raise - - if stat.S_ISLNK(fs_mode): - # User has replaced directory with a - # link, or a package has been poorly - # implemented. It isn't safe to - # simply re-create the directory as - # that won't restore the files that - # are supposed to be contained within. - err_txt = _("Unable to create " - "directory {0}; it has been " - "replaced with a link. To " - "continue, please remove the " - "link or restore the directory " - "to its original location and " - "try again.").format(path) - raise apx.ActionExecutionError( - self, details=err_txt, error=e, - fmri=pkgplan.destination_fmri) - elif stat.S_ISREG(fs_mode): - # User has replaced directory with a - # file, or a package has been poorly - # implemented. Salvage what's there, - # and drive on. - pkgplan.salvage(path) - os.mkdir(path, mode) - elif stat.S_ISDIR(fs_mode): - # The directory already exists, but - # ensure that the mode matches what's - # expected. - os.chmod(path, mode) - - def install(self, pkgplan, orig): - """Client-side method that installs a directory.""" - - mode = None - try: - mode = int(self.attrs.get("mode", None), 8) - except (TypeError, ValueError): - # Mode isn't valid, so let validate raise a more - # informative error. - self.validate(fmri=pkgplan.destination_fmri) - - omode = oowner = ogroup = None - owner, group = self.get_fsobj_uid_gid(pkgplan, - pkgplan.destination_fmri) - if orig: - try: - omode = int(orig.attrs.get("mode", None), 8) - except (TypeError, ValueError): - # Mode isn't valid, so let validate raise a more - # informative error. - orig.validate(fmri=pkgplan.origin_fmri) - oowner, ogroup = orig.get_fsobj_uid_gid(pkgplan, - pkgplan.origin_fmri) - - path = self.get_installed_path(pkgplan.image.get_root()) - - # Don't allow installation through symlinks. - self.fsobj_checkpath(pkgplan, path) - - # XXX Hack! (See below comment.) - if not portable.is_admin(): - mode |= stat.S_IWUSR - - if not orig: - self.__create_directory(pkgplan, path, mode) - - # The downside of chmodding the directory is that as a non-root - # user, if we set perms u-w, we won't be able to put anything in - # it, which is often not what we want at install time. We save - # the chmods for the postinstall phase, but it's always possible - # that a later package install will want to place something in - # this directory and then be unable to. So perhaps we need to - # (in all action types) chmod the parent directory to u+w on - # failure, and chmod it back aftwards. The trick is to - # recognize failure due to missing file_dac_write in contrast to - # other failures. Or can we require that everyone simply have - # file_dac_write who wants to use the tools. Probably not. - elif mode != omode: - try: - os.chmod(path, mode) - except Exception as e: - if e.errno != errno.EPERM and e.errno != \ - errno.ENOSYS: - # Assume chmod failed due to a - # recoverable error. - self.__create_directory(pkgplan, path, - mode) - omode = oowner = ogroup = None - - # if we're salvaging contents, move 'em now. - # directories with "salvage-from" attribute - # set will scavenge any available contents - # that matches specified directory and - # move it underneath itself on install or update. - # This is here to support directory rename - # when old directory has unpackaged contents, or - # consolidation of content from older directories. - for salvage_from in self.attrlist("salvage-from"): - pkgplan.salvage_from(salvage_from, path) - - if not orig or oowner != owner or ogroup != group: - try: - portable.chown(path, owner, group) - except OSError as e: - if e.errno != errno.EPERM and \ - e.errno != errno.ENOSYS: - # Assume chown failed due to a - # recoverable error. - self.__create_directory(pkgplan, path, - mode, uid=owner, gid=group) - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - lstat, errors, warnings, info, abort = \ - self.verify_fsobj_common(img, stat.S_IFDIR) - return errors, warnings, info - - def remove(self, pkgplan): - path = self.get_installed_path(pkgplan.image.get_root()) - try: - os.rmdir(path) - except OSError as e: - if e.errno == errno.EINVAL and path == '/': - # This is ok, illumos will return EINVAL - # for attempts to remove / - pass - elif e.errno == errno.ENOENT: - pass - elif e.errno in (errno.EEXIST, errno.ENOTEMPTY): - # Cannot remove directory since it's - # not empty. - pkgplan.salvage(path) - elif e.errno == errno.ENOTDIR: - # Either the user or another package has changed - # this directory into a link or file. Salvage - # what's there and drive on. - pkgplan.salvage(path) - elif e.errno == errno.EBUSY and os.path.ismount(path): - # User has replaced directory with mountpoint, - # or a package has been poorly implemented. - if not self.attrs.get("implicit"): - err_txt = _("Unable to remove {0}; it is " - "in use as a mountpoint. To " - "continue, please unmount the " - "filesystem at the target " - "location and try again.").format( - path) - raise apx.ActionExecutionError(self, - details=err_txt, error=e, - fmri=pkgplan.origin_fmri) - elif e.errno == errno.EBUSY: - # os.path.ismount() is broken for lofs - # filesystems, so give a more generic - # error. - if not self.attrs.get("implicit"): - err_txt = _("Unable to remove {0}; it " - "is in use by the system, another " - "process, or as a " - "mountpoint.").format(path) - raise apx.ActionExecutionError(self, - details=err_txt, error=e, - fmri=pkgplan.origin_fmri) - elif e.errno != errno.EACCES: # this happens on Windows - raise - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - return [ - (self.name, "basename", - os.path.basename(self.attrs["path"].rstrip(os.path.sep)), - None), - (self.name, "path", os.path.sep + self.attrs["path"], - None) - ] - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action.""" - - errors = generic.Action._validate(self, fmri=fmri, - raise_errors=False, required_attrs=("owner", "group")) - errors.extend(self._validate_fsobj_common()) - if errors: - raise pkg.actions.InvalidActionAttributesError(self, - errors, fmri=fmri) + """Class representing a directory-type packaging object.""" + + __slots__ = [] + + name = "dir" + key_attr = "path" + unique_attrs = "path", "mode", "owner", "group" + globally_identical = True + refcountable = True + namespace_group = "path" + ordinality = generic._orderdict[name] + + def compare(self, other): + return (self.attrs["path"] > other.attrs["path"]) - ( + self.attrs["path"] < other.attrs["path"] + ) + + def differences(self, other): + """Returns a list of attributes that have different values + between 'other' and 'self'. This differs from the generic + Action's differences() method in that it normalizes the 'mode' + attribute so that, say, '0755' and '755' are treated as + identical.""" + + diffs = generic.Action.differences(self, other) + + if "mode" in diffs and int(self.attrs.get("mode", "0"), 8) == int( + other.attrs.get("mode", "0"), 8 + ): + diffs.remove("mode") + + return diffs + + def directory_references(self): + return [os.path.normpath(self.attrs["path"])] + + def __create_directory(self, pkgplan, path, mode, **kwargs): + """Create a directory.""" + + try: + self.makedirs( + path, mode=mode, fmri=pkgplan.destination_fmri, **kwargs + ) + except OSError as e: + if e.filename != path: + # makedirs failed for some component + # of the path. + raise + + fs = os.lstat(path) + fs_mode = stat.S_IFMT(fs.st_mode) + if e.errno == errno.EROFS: + # Treat EROFS like EEXIST if both are + # applicable, since we'll end up with + # EROFS instead. + if stat.S_ISDIR(fs_mode): + return + raise + elif e.errno != errno.EEXIST: + raise + + if stat.S_ISLNK(fs_mode): + # User has replaced directory with a + # link, or a package has been poorly + # implemented. It isn't safe to + # simply re-create the directory as + # that won't restore the files that + # are supposed to be contained within. + err_txt = _( + "Unable to create " + "directory {0}; it has been " + "replaced with a link. To " + "continue, please remove the " + "link or restore the directory " + "to its original location and " + "try again." + ).format(path) + raise apx.ActionExecutionError( + self, + details=err_txt, + error=e, + fmri=pkgplan.destination_fmri, + ) + elif stat.S_ISREG(fs_mode): + # User has replaced directory with a + # file, or a package has been poorly + # implemented. Salvage what's there, + # and drive on. + pkgplan.salvage(path) + os.mkdir(path, mode) + elif stat.S_ISDIR(fs_mode): + # The directory already exists, but + # ensure that the mode matches what's + # expected. + os.chmod(path, mode) + + def install(self, pkgplan, orig): + """Client-side method that installs a directory.""" + + mode = None + try: + mode = int(self.attrs.get("mode", None), 8) + except (TypeError, ValueError): + # Mode isn't valid, so let validate raise a more + # informative error. + self.validate(fmri=pkgplan.destination_fmri) + + omode = oowner = ogroup = None + owner, group = self.get_fsobj_uid_gid(pkgplan, pkgplan.destination_fmri) + if orig: + try: + omode = int(orig.attrs.get("mode", None), 8) + except (TypeError, ValueError): + # Mode isn't valid, so let validate raise a more + # informative error. + orig.validate(fmri=pkgplan.origin_fmri) + oowner, ogroup = orig.get_fsobj_uid_gid( + pkgplan, pkgplan.origin_fmri + ) + + path = self.get_installed_path(pkgplan.image.get_root()) + + # Don't allow installation through symlinks. + self.fsobj_checkpath(pkgplan, path) + + # XXX Hack! (See below comment.) + if not portable.is_admin(): + mode |= stat.S_IWUSR + + if not orig: + self.__create_directory(pkgplan, path, mode) + + # The downside of chmodding the directory is that as a non-root + # user, if we set perms u-w, we won't be able to put anything in + # it, which is often not what we want at install time. We save + # the chmods for the postinstall phase, but it's always possible + # that a later package install will want to place something in + # this directory and then be unable to. So perhaps we need to + # (in all action types) chmod the parent directory to u+w on + # failure, and chmod it back aftwards. The trick is to + # recognize failure due to missing file_dac_write in contrast to + # other failures. Or can we require that everyone simply have + # file_dac_write who wants to use the tools. Probably not. + elif mode != omode: + try: + os.chmod(path, mode) + except Exception as e: + if e.errno != errno.EPERM and e.errno != errno.ENOSYS: + # Assume chmod failed due to a + # recoverable error. + self.__create_directory(pkgplan, path, mode) + omode = oowner = ogroup = None + + # if we're salvaging contents, move 'em now. + # directories with "salvage-from" attribute + # set will scavenge any available contents + # that matches specified directory and + # move it underneath itself on install or update. + # This is here to support directory rename + # when old directory has unpackaged contents, or + # consolidation of content from older directories. + for salvage_from in self.attrlist("salvage-from"): + pkgplan.salvage_from(salvage_from, path) + + if not orig or oowner != owner or ogroup != group: + try: + portable.chown(path, owner, group) + except OSError as e: + if e.errno != errno.EPERM and e.errno != errno.ENOSYS: + # Assume chown failed due to a + # recoverable error. + self.__create_directory( + pkgplan, path, mode, uid=owner, gid=group + ) + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + lstat, errors, warnings, info, abort = self.verify_fsobj_common( + img, stat.S_IFDIR + ) + return errors, warnings, info + + def remove(self, pkgplan): + path = self.get_installed_path(pkgplan.image.get_root()) + try: + os.rmdir(path) + except OSError as e: + if e.errno == errno.EINVAL and path == "/": + # This is ok, illumos will return EINVAL + # for attempts to remove / + pass + elif e.errno == errno.ENOENT: + pass + elif e.errno in (errno.EEXIST, errno.ENOTEMPTY): + # Cannot remove directory since it's + # not empty. + pkgplan.salvage(path) + elif e.errno == errno.ENOTDIR: + # Either the user or another package has changed + # this directory into a link or file. Salvage + # what's there and drive on. + pkgplan.salvage(path) + elif e.errno == errno.EBUSY and os.path.ismount(path): + # User has replaced directory with mountpoint, + # or a package has been poorly implemented. + if not self.attrs.get("implicit"): + err_txt = _( + "Unable to remove {0}; it is " + "in use as a mountpoint. To " + "continue, please unmount the " + "filesystem at the target " + "location and try again." + ).format(path) + raise apx.ActionExecutionError( + self, details=err_txt, error=e, fmri=pkgplan.origin_fmri + ) + elif e.errno == errno.EBUSY: + # os.path.ismount() is broken for lofs + # filesystems, so give a more generic + # error. + if not self.attrs.get("implicit"): + err_txt = _( + "Unable to remove {0}; it " + "is in use by the system, another " + "process, or as a " + "mountpoint." + ).format(path) + raise apx.ActionExecutionError( + self, details=err_txt, error=e, fmri=pkgplan.origin_fmri + ) + elif e.errno != errno.EACCES: # this happens on Windows + raise + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + return [ + ( + self.name, + "basename", + os.path.basename(self.attrs["path"].rstrip(os.path.sep)), + None, + ), + (self.name, "path", os.path.sep + self.attrs["path"], None), + ] + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action.""" + + errors = generic.Action._validate( + self, + fmri=fmri, + raise_errors=False, + required_attrs=("owner", "group"), + ) + errors.extend(self._validate_fsobj_common()) + if errors: + raise pkg.actions.InvalidActionAttributesError( + self, errors, fmri=fmri + ) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/driver.py b/src/modules/actions/driver.py index e4054b116..39502759e 100644 --- a/src/modules/actions/driver.py +++ b/src/modules/actions/driver.py @@ -40,232 +40,241 @@ import pkg.pkgsubprocess as subprocess from pkg.client.debugvalues import DebugValues + class DriverAction(generic.Action): - """Class representing a driver-type packaging object.""" - - __slots__ = [] - - name = "driver" - key_attr = "name" - globally_identical = True - ordinality = generic._orderdict[name] - - usr_sbin = None - add_drv = None - rem_drv = None - update_drv = None - - def __init__(self, data=None, **attrs): - generic.Action.__init__(self, data, **attrs) - - if not self.__class__.usr_sbin: - self.__usr_sbin_init() - - # - # Clean up clone_perms. This attribute may been specified either as: - # - # or - # - # - # In the latter case, the is assumed to be - # the same as the driver name. Correct any such instances - # here so that there is only one form, so that we can cleanly - # compare, verify, etc. - # - if not self.attrlist("clone_perms"): - return - - new_cloneperms = [] - for cp in self.attrlist("clone_perms"): - # If we're given three fields, assume the minor node - # name is the same as the driver name. - if len(cp.split()) == 3: - new_cloneperms.append( - self.attrs["name"] + " " + cp) - else: - new_cloneperms.append(cp) - if len(new_cloneperms) == 1: - self.attrs["clone_perms"] = new_cloneperms[0] - else: - self.attrs["clone_perms"] = new_cloneperms - - def compare(self, other): - """Compare with other driver instance; defined to force - clone driver to be removed last""" - - ret = (self.attrs["name"] > other.attrs["name"]) - \ - (self.attrs["name"] < other.attrs["name"]) - - if ret == 0: - return 0 - - if self.attrs["name"] == "clone": - return -1 - elif other.attrs["name"] == "clone": - return 1 - return ret - - @staticmethod - def __usr_sbin_init(): - """Initialize paths to device management commands that we will - execute when handling package driver actions""" - - usr_sbin = DebugValues.get("driver-cmd-dir", "/usr/sbin") + "/" - DriverAction.usr_sbin = usr_sbin - DriverAction.add_drv = usr_sbin + "add_drv" - DriverAction.rem_drv = usr_sbin + "rem_drv" - DriverAction.update_drv = usr_sbin + "update_drv" - - @staticmethod - def __call(args, fmt, fmtargs): - proc = subprocess.Popen(args, stdout = subprocess.PIPE, - stderr = subprocess.STDOUT) - buf = proc.stdout.read() - ret = proc.wait() - - if ret != 0: - fmtargs["retcode"] = ret - # XXX Module printing - print() - fmt += " failed with return code {retcode}" - print(_(fmt).format(**fmtargs)) - print(("command run was:"), " ".join(args)) - print("command output was:") - print("-" * 60) - print(buf, end=" ") - print("-" * 60) - - @staticmethod - def remove_aliases(driver_name, aliases, image): - if not DriverAction.update_drv: - DriverAction.__usr_sbin_init() - # This should not happen in non-global zones. - if image.is_zone(): - return - rem_base = (DriverAction.update_drv, "-b", image.get_root(), "-d") - for i in aliases: - args = rem_base + ("-i", '{0}'.format(i), driver_name) - DriverAction.__call(args, "driver ({name}) upgrade (removal " - "of alias '{alias}')", - {"name": driver_name, "alias": i}) - - @classmethod - def __activate_drivers(cls): - cls.__call([cls.usr_sbin + "devfsadm", "-u"], - "Driver activation failed", {}) - - def install(self, pkgplan, orig): - image = pkgplan.image - - if image.is_zone(): - return - - # See if it's installed - major = False - try: - for fields in DriverAction.__gen_read_binding_file( - image, "etc/name_to_major", minfields=2, - maxfields=2): - if fields[0] == self.attrs["name"]: - major = True - break - except IOError: - pass - - # Iterate through driver_aliases and the driver actions of the - # target image to see if this driver will bind to an alias that - # some other driver will bind to. If we find that it will, and - # it's unclaimed by any other driver action, then we want to - # comment out (for easier recovery) the entry from the file. If - # it's claimed by another driver action, then we should fail - # installation, as if two driver actions tried to deliver the - # same driver. If it's unclaimed, but appears to belong to a - # driver of the same name as this one, we'll safely slurp it in - # with __get_image_data(). - # - # XXX This check should be done in imageplan preexecution. - - file_db = {} - alias_lines = {} - alias_conflict = None - lines = [] - try: - for fields in DriverAction.__gen_read_binding_file( - image, "etc/driver_aliases", raw=True, minfields=2, - maxfields=2): - if isinstance(fields, str): - lines.append(fields + "\n") - continue - - name, alias = fields - file_db.setdefault(name, set()).add(alias) - alias_lines.setdefault(alias, []).append( - (name, len(lines))) - lines.append("{0} \"{1}\"\n".format(*fields)) - except IOError: - pass - - a2d = getattr(image.imageplan, "alias_to_driver", None) - driver_actions = image.imageplan.get_actions("driver") - if a2d is None: - # For all the drivers we know will be in the final - # image, remove them from the db we made by slurping in - # the aliases file. What's left is what we should be - # checking for dups against, along with the rest of the - # drivers. - for name in driver_actions: - file_db.pop(name, None) - - # Build a mapping of aliases to driver names based on - # the target image's driver actions. - a2d = {} - for alias, name in ( - (a, n) - for n, act_list in six.iteritems(driver_actions) - for act in act_list - for a in act.attrlist("alias") - ): - a2d.setdefault(alias, set()).add(name) - - # Enhance that mapping with data from driver_aliases. - for name, aliases in six.iteritems(file_db): - for alias in aliases: - a2d.setdefault(alias, set()).add(name) - - # Stash this on the imageplan so we don't have to do the - # work again. - image.imageplan.alias_to_driver = a2d - - for alias in self.attrlist("alias"): - names = a2d[alias] - assert self.attrs["name"] in names - if len(names) > 1: - alias_conflict = alias - break - - if alias_conflict: - be_name = getattr(image.bootenv, "be_name_clone", None) - name, line = alias_lines[alias_conflict][0] - errdict = { - "new": self.attrs["name"], - "old": name, - "alias": alias_conflict, - "line": line, - "be": be_name, - "imgroot": image.get_root() - } - if name in driver_actions: - raise RuntimeError("\ + """Class representing a driver-type packaging object.""" + + __slots__ = [] + + name = "driver" + key_attr = "name" + globally_identical = True + ordinality = generic._orderdict[name] + + usr_sbin = None + add_drv = None + rem_drv = None + update_drv = None + + def __init__(self, data=None, **attrs): + generic.Action.__init__(self, data, **attrs) + + if not self.__class__.usr_sbin: + self.__usr_sbin_init() + + # + # Clean up clone_perms. This attribute may been specified either as: + # + # or + # + # + # In the latter case, the is assumed to be + # the same as the driver name. Correct any such instances + # here so that there is only one form, so that we can cleanly + # compare, verify, etc. + # + if not self.attrlist("clone_perms"): + return + + new_cloneperms = [] + for cp in self.attrlist("clone_perms"): + # If we're given three fields, assume the minor node + # name is the same as the driver name. + if len(cp.split()) == 3: + new_cloneperms.append(self.attrs["name"] + " " + cp) + else: + new_cloneperms.append(cp) + if len(new_cloneperms) == 1: + self.attrs["clone_perms"] = new_cloneperms[0] + else: + self.attrs["clone_perms"] = new_cloneperms + + def compare(self, other): + """Compare with other driver instance; defined to force + clone driver to be removed last""" + + ret = (self.attrs["name"] > other.attrs["name"]) - ( + self.attrs["name"] < other.attrs["name"] + ) + + if ret == 0: + return 0 + + if self.attrs["name"] == "clone": + return -1 + elif other.attrs["name"] == "clone": + return 1 + return ret + + @staticmethod + def __usr_sbin_init(): + """Initialize paths to device management commands that we will + execute when handling package driver actions""" + + usr_sbin = DebugValues.get("driver-cmd-dir", "/usr/sbin") + "/" + DriverAction.usr_sbin = usr_sbin + DriverAction.add_drv = usr_sbin + "add_drv" + DriverAction.rem_drv = usr_sbin + "rem_drv" + DriverAction.update_drv = usr_sbin + "update_drv" + + @staticmethod + def __call(args, fmt, fmtargs): + proc = subprocess.Popen( + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) + buf = proc.stdout.read() + ret = proc.wait() + + if ret != 0: + fmtargs["retcode"] = ret + # XXX Module printing + print() + fmt += " failed with return code {retcode}" + print(_(fmt).format(**fmtargs)) + print(("command run was:"), " ".join(args)) + print("command output was:") + print("-" * 60) + print(buf, end=" ") + print("-" * 60) + + @staticmethod + def remove_aliases(driver_name, aliases, image): + if not DriverAction.update_drv: + DriverAction.__usr_sbin_init() + # This should not happen in non-global zones. + if image.is_zone(): + return + rem_base = (DriverAction.update_drv, "-b", image.get_root(), "-d") + for i in aliases: + args = rem_base + ("-i", "{0}".format(i), driver_name) + DriverAction.__call( + args, + "driver ({name}) upgrade (removal " "of alias '{alias}')", + {"name": driver_name, "alias": i}, + ) + + @classmethod + def __activate_drivers(cls): + cls.__call( + [cls.usr_sbin + "devfsadm", "-u"], "Driver activation failed", {} + ) + + def install(self, pkgplan, orig): + image = pkgplan.image + + if image.is_zone(): + return + + # See if it's installed + major = False + try: + for fields in DriverAction.__gen_read_binding_file( + image, "etc/name_to_major", minfields=2, maxfields=2 + ): + if fields[0] == self.attrs["name"]: + major = True + break + except IOError: + pass + + # Iterate through driver_aliases and the driver actions of the + # target image to see if this driver will bind to an alias that + # some other driver will bind to. If we find that it will, and + # it's unclaimed by any other driver action, then we want to + # comment out (for easier recovery) the entry from the file. If + # it's claimed by another driver action, then we should fail + # installation, as if two driver actions tried to deliver the + # same driver. If it's unclaimed, but appears to belong to a + # driver of the same name as this one, we'll safely slurp it in + # with __get_image_data(). + # + # XXX This check should be done in imageplan preexecution. + + file_db = {} + alias_lines = {} + alias_conflict = None + lines = [] + try: + for fields in DriverAction.__gen_read_binding_file( + image, "etc/driver_aliases", raw=True, minfields=2, maxfields=2 + ): + if isinstance(fields, str): + lines.append(fields + "\n") + continue + + name, alias = fields + file_db.setdefault(name, set()).add(alias) + alias_lines.setdefault(alias, []).append((name, len(lines))) + lines.append('{0} "{1}"\n'.format(*fields)) + except IOError: + pass + + a2d = getattr(image.imageplan, "alias_to_driver", None) + driver_actions = image.imageplan.get_actions("driver") + if a2d is None: + # For all the drivers we know will be in the final + # image, remove them from the db we made by slurping in + # the aliases file. What's left is what we should be + # checking for dups against, along with the rest of the + # drivers. + for name in driver_actions: + file_db.pop(name, None) + + # Build a mapping of aliases to driver names based on + # the target image's driver actions. + a2d = {} + for alias, name in ( + (a, n) + for n, act_list in six.iteritems(driver_actions) + for act in act_list + for a in act.attrlist("alias") + ): + a2d.setdefault(alias, set()).add(name) + + # Enhance that mapping with data from driver_aliases. + for name, aliases in six.iteritems(file_db): + for alias in aliases: + a2d.setdefault(alias, set()).add(name) + + # Stash this on the imageplan so we don't have to do the + # work again. + image.imageplan.alias_to_driver = a2d + + for alias in self.attrlist("alias"): + names = a2d[alias] + assert self.attrs["name"] in names + if len(names) > 1: + alias_conflict = alias + break + + if alias_conflict: + be_name = getattr(image.bootenv, "be_name_clone", None) + name, line = alias_lines[alias_conflict][0] + errdict = { + "new": self.attrs["name"], + "old": name, + "alias": alias_conflict, + "line": line, + "be": be_name, + "imgroot": image.get_root(), + } + if name in driver_actions: + raise RuntimeError( + "\ The '{new}' driver shares the alias '{alias}' with the '{old}'\n\ driver; both drivers cannot be installed simultaneously. Please remove\n\ the package delivering '{old}' or ensure that the package delivering\n\ -'{new}' will not be installed, and try the operation again.".format(**errdict)) - else: - comment = "# pkg(7): " - lines[line] = comment + lines[line] - # XXX Module printing - if be_name: - print("\ +'{new}' will not be installed, and try the operation again.".format( + **errdict + ) + ) + else: + comment = "# pkg(7): " + lines[line] = comment + lines[line] + # XXX Module printing + if be_name: + print( + "\ The '{new}' driver shares the alias '{alias}' with the '{old}'\n\ driver, but the system cannot determine how the latter was delivered.\n\ Its entry on line {line:d} in /etc/driver_aliases has been commented\n\ @@ -274,734 +283,837 @@ def install(self, pkgplan, orig): as well as removing line {line:d} from /etc/driver_aliases or, before\n\ rebooting, mounting the '{be}' boot environment and running\n\ 'rem_drv -b {old}' and removing line {line:d} from\n\ -/etc/driver_aliases.".format(**errdict)) - else: - print("\ +/etc/driver_aliases.".format( + **errdict + ) + ) + else: + print( + "\ The '{new}' driver shares the alias '{alias}' with the '{old}'\n\ driver, but the system cannot determine how the latter was delivered.\n\ Its entry on line {line:d} in /etc/driver_aliases has been commented\n\ out. If this driver is no longer needed, it may be removed by invoking\n\ 'rem_drv -b {imgroot} {old}' as well as removing line {line:d}\n\ -from {imgroot}/etc/driver_aliases.".format(**errdict)) - - dap = image.get_root() + "/etc/driver_aliases" - datd, datp = mkstemp(suffix=".driver_aliases", - dir=image.get_root() + "/etc") - f = os.fdopen(datd, "w") - f.writelines(lines) - f.close() - st = os.stat(dap) - os.chmod(datp, st.st_mode) - os.chown(datp, st.st_uid, st.st_gid) - os.rename(datp, dap) - - # In the case where the packaging system thinks the driver - # is installed and the driver database doesn't, do a fresh - # install instead of an update. If the system thinks the driver - # is installed but the packaging has no previous knowledge of - # it, read the driver files to construct what *should* have been - # there, and proceed. - # - # XXX Log that this occurred. - if major and not orig: - orig = self.__get_image_data(image, self.attrs["name"]) - elif orig and not major: - orig = None - - if orig: - return self.__update_install(image, orig) - - if image.is_liveroot(): - args = ( self.add_drv, "-u" ) - image.imageplan.add_actuator("install", - "activate-drivers", self.__activate_drivers) - else: - args = ( self.add_drv, "-n", "-b", image.get_root() ) - - if "alias" in self.attrs: - args += ( - "-i", - " ".join([ '"{0}"'.format(x) for x in self.attrlist("alias") ]) - ) - if "class" in self.attrs: - args += ( - "-c", - " ".join(self.attrlist("class")) - ) - if "perms" in self.attrs: - args += ( - "-m", - ",".join(self.attrlist("perms")) - ) - if "policy" in self.attrs: - args += ( - "-p", - " ".join(self.attrlist("policy")) - ) - if "privs" in self.attrs: - args += ( - "-P", - ",".join(self.attrlist("privs")) +from {imgroot}/etc/driver_aliases.".format( + **errdict ) + ) + + dap = image.get_root() + "/etc/driver_aliases" + datd, datp = mkstemp( + suffix=".driver_aliases", dir=image.get_root() + "/etc" + ) + f = os.fdopen(datd, "w") + f.writelines(lines) + f.close() + st = os.stat(dap) + os.chmod(datp, st.st_mode) + os.chown(datp, st.st_uid, st.st_gid) + os.rename(datp, dap) + + # In the case where the packaging system thinks the driver + # is installed and the driver database doesn't, do a fresh + # install instead of an update. If the system thinks the driver + # is installed but the packaging has no previous knowledge of + # it, read the driver files to construct what *should* have been + # there, and proceed. + # + # XXX Log that this occurred. + if major and not orig: + orig = self.__get_image_data(image, self.attrs["name"]) + elif orig and not major: + orig = None + + if orig: + return self.__update_install(image, orig) + + if image.is_liveroot(): + args = (self.add_drv, "-u") + image.imageplan.add_actuator( + "install", "activate-drivers", self.__activate_drivers + ) + else: + args = (self.add_drv, "-n", "-b", image.get_root()) + + if "alias" in self.attrs: + args += ( + "-i", + " ".join(['"{0}"'.format(x) for x in self.attrlist("alias")]), + ) + if "class" in self.attrs: + args += ("-c", " ".join(self.attrlist("class"))) + if "perms" in self.attrs: + args += ("-m", ",".join(self.attrlist("perms"))) + if "policy" in self.attrs: + args += ("-p", " ".join(self.attrlist("policy"))) + if "privs" in self.attrs: + args += ("-P", ",".join(self.attrlist("privs"))) + + args += (self.attrs["name"],) + + self.__call( + args, "driver ({name}) install", {"name": self.attrs["name"]} + ) + + for cp in self.attrlist("clone_perms"): + args = ( + self.update_drv, + "-b", + image.get_root(), + "-a", + "-m", + cp, + "clone", + ) + self.__call( + args, + "driver ({name}) clone permission " "update", + {"name": self.attrs["name"]}, + ) + + if "devlink" in self.attrs: + dlp = os.path.normpath( + os.path.join(image.get_root(), "etc/devlink.tab") + ) + dlf = open(dlp) + dllines = dlf.readlines() + dlf.close() + st = os.stat(dlp) + + dlt, dltp = mkstemp( + suffix=".devlink.tab", dir=image.get_root() + "/etc" + ) + dlt = os.fdopen(dlt, "w") + dlt.writelines(dllines) + dlt.writelines( + ( + s.replace("\\t", "\t") + "\n" + for s in self.attrlist("devlink") + if s.replace("\\t", "\t") + "\n" not in dllines + ) + ) + dlt.close() + os.chmod(dltp, st.st_mode) + os.chown(dltp, st.st_uid, st.st_gid) + os.rename(dltp, dlp) + + def __update_install(self, image, orig): + add_base = (self.update_drv, "-b", image.get_root(), "-a") + rem_base = (self.update_drv, "-b", image.get_root(), "-d") + + add_alias = set(self.attrlist("alias")) - set(orig.attrlist("alias")) + + nclass = set(self.attrlist("class")) + oclass = set(orig.attrlist("class")) + add_class = nclass - oclass + rem_class = oclass - nclass + + nperms = set(self.attrlist("perms")) + operms = set(orig.attrlist("perms")) + add_perms = nperms - operms + rem_perms = operms - nperms + + nprivs = set(self.attrlist("privs")) + oprivs = set(orig.attrlist("privs")) + add_privs = nprivs - oprivs + rem_privs = oprivs - nprivs + + npolicy = set(self.attrlist("policy")) + opolicy = set(orig.attrlist("policy")) + add_policy = npolicy - opolicy + rem_policy = opolicy - npolicy + + nclone = set(self.attrlist("clone_perms")) + oclone = set(orig.attrlist("clone_perms")) + add_clone = nclone - oclone + rem_clone = oclone - nclone + + for i in add_alias: + args = add_base + ("-i", "{0}".format(i), self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (addition " "of alias '{alias}')", + {"name": self.attrs["name"], "alias": i}, + ) + + # Removing aliases has already been taken care of in + # imageplan.execute by calling remove_aliases. + + # update_drv doesn't do anything with classes, so we have to + # futz with driver_classes by hand. + def update_classes(add_class, rem_class): + dcp = os.path.normpath( + os.path.join(image.get_root(), "etc/driver_classes") + ) + + try: + dcf = open(dcp, "r") + lines = dcf.readlines() + dcf.close() + except IOError as e: + e.args += ("reading",) + raise + + for i, l in enumerate(lines): + arr = l.split() + if ( + len(arr) == 2 + and arr[0] == self.attrs["name"] + and arr[1] in rem_class + ): + del lines[i] + + for i in add_class: + lines += ["{0}\t{1}\n".format(self.attrs["name"], i)] + + try: + dcf = open(dcp, "w") + dcf.writelines(lines) + dcf.close() + except IOError as e: + e.args += ("writing",) + raise + + if add_class or rem_class: + try: + update_classes(add_class, rem_class) + except IOError as e: + print( + "{0} ({1}) upgrade (classes modification) " + "failed {2} etc/driver_classes with error: " + "{3} ({4})".format( + self.name, + self.attrs["name"], + e.args[1], + e.args[0], + e.args[2], + ) + ) + print( + "tried to add {0} and remove {1}".format( + add_class, rem_class + ) + ) - args += ( self.attrs["name"], ) + # We have to update devlink.tab by hand, too. + def update_devlinks(): + dlp = os.path.normpath( + os.path.join(image.get_root(), "etc/devlink.tab") + ) + + try: + dlf = open(dlp) + lines = dlf.readlines() + dlf.close() + st = os.stat(dlp) + except IOError as e: + e.args += ("reading",) + raise + + olines = set(orig.attrlist("devlink")) + nlines = set(self.attrlist("devlink")) + add_lines = nlines - olines + rem_lines = olines - nlines + + missing_entries = [] + for line in rem_lines: + try: + lineno = lines.index(line.replace("\\t", "\t") + "\n") + except ValueError: + missing_entries.append(line.replace("\\t", "\t")) + continue + del lines[lineno] + + # Don't put in duplicates. Because there's no way to + # tell what driver owns what line, this means that if + # two drivers try to own the same line, one of them will + # be unhappy if the other is uninstalled. So don't do + # that. + lines.extend( + ( + s.replace("\\t", "\t") + "\n" + for s in add_lines + if s.replace("\\t", "\t") + "\n" not in lines + ) + ) - self.__call(args, "driver ({name}) install", - {"name": self.attrs["name"]}) + try: + dlt, dltp = mkstemp( + suffix=".devlink.tab", dir=image.get_root() + "/etc" + ) + dlt = os.fdopen(dlt, "w") + dlt.writelines(lines) + dlt.close() + os.chmod(dltp, st.st_mode) + os.chown(dltp, st.st_uid, st.st_gid) + os.rename(dltp, dlp) + except EnvironmentError as e: + e.args += ("writing",) + raise + + if missing_entries: + raise RuntimeError(missing_entries) + + if "devlink" in orig.attrs or "devlink" in self.attrs: + try: + update_devlinks() + except IOError as e: + print( + "{0} ({1}) upgrade (devlinks modification) " + "failed {2} etc/devlink.tab with error: " + "{3} ({4})".format( + self.name, + self.attrs["name"], + e.args[1], + e.args[0], + e.args[2], + ) + ) + except RuntimeError as e: + print( + "{0} ({1}) upgrade (devlinks modification) " + "failed modifying\netc/devlink.tab. The " + "following entries were to be removed, " + "but were\nnot found:\n ".format( + self.name, self.attrs["name"] + ) + + "\n ".join(e.args[0]) + ) - for cp in self.attrlist("clone_perms"): - args = ( - self.update_drv, "-b", image.get_root(), "-a", - "-m", cp, "clone" - ) - self.__call(args, "driver ({name}) clone permission " - "update", {"name": self.attrs["name"]}) - - if "devlink" in self.attrs: - dlp = os.path.normpath(os.path.join( - image.get_root(), "etc/devlink.tab")) - dlf = open(dlp) - dllines = dlf.readlines() - dlf.close() - st = os.stat(dlp) - - dlt, dltp = mkstemp(suffix=".devlink.tab", - dir=image.get_root() + "/etc") - dlt = os.fdopen(dlt, "w") - dlt.writelines(dllines) - dlt.writelines(( - s.replace("\\t", "\t") + "\n" - for s in self.attrlist("devlink") - if s.replace("\\t", "\t") + "\n" not in dllines - )) - dlt.close() - os.chmod(dltp, st.st_mode) - os.chown(dltp, st.st_uid, st.st_gid) - os.rename(dltp, dlp) - - def __update_install(self, image, orig): - add_base = ( self.update_drv, "-b", image.get_root(), "-a" ) - rem_base = ( self.update_drv, "-b", image.get_root(), "-d" ) - - add_alias = set(self.attrlist("alias")) - \ - set(orig.attrlist("alias")) - - nclass = set(self.attrlist("class")) - oclass = set(orig.attrlist("class")) - add_class = nclass - oclass - rem_class = oclass - nclass - - nperms = set(self.attrlist("perms")) - operms = set(orig.attrlist("perms")) - add_perms = nperms - operms - rem_perms = operms - nperms - - nprivs = set(self.attrlist("privs")) - oprivs = set(orig.attrlist("privs")) - add_privs = nprivs - oprivs - rem_privs = oprivs - nprivs - - npolicy = set(self.attrlist("policy")) - opolicy = set(orig.attrlist("policy")) - add_policy = npolicy - opolicy - rem_policy = opolicy - npolicy - - nclone = set(self.attrlist("clone_perms")) - oclone = set(orig.attrlist("clone_perms")) - add_clone = nclone - oclone - rem_clone = oclone - nclone - - for i in add_alias: - args = add_base + ("-i", '{0}'.format(i), - self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (addition " - "of alias '{alias}')", - {"name": self.attrs["name"], "alias": i}) - - # Removing aliases has already been taken care of in - # imageplan.execute by calling remove_aliases. - - # update_drv doesn't do anything with classes, so we have to - # futz with driver_classes by hand. - def update_classes(add_class, rem_class): - dcp = os.path.normpath(os.path.join( - image.get_root(), "etc/driver_classes")) - - try: - dcf = open(dcp, "r") - lines = dcf.readlines() - dcf.close() - except IOError as e: - e.args += ("reading",) - raise - - for i, l in enumerate(lines): - arr = l.split() - if len(arr) == 2 and \ - arr[0] == self.attrs["name"] and \ - arr[1] in rem_class: - del lines[i] - - for i in add_class: - lines += ["{0}\t{1}\n".format( - self.attrs["name"], i)] - - try: - dcf = open(dcp, "w") - dcf.writelines(lines) - dcf.close() - except IOError as e: - e.args += ("writing",) - raise - - if add_class or rem_class: - try: - update_classes(add_class, rem_class) - except IOError as e: - print("{0} ({1}) upgrade (classes modification) " - "failed {2} etc/driver_classes with error: " - "{3} ({4})".format(self.name, - self.attrs["name"], e.args[1], - e.args[0], e.args[2])) - print("tried to add {0} and remove {1}".format( - add_class, rem_class)) - - # We have to update devlink.tab by hand, too. - def update_devlinks(): - dlp = os.path.normpath(os.path.join( - image.get_root(), "etc/devlink.tab")) - - try: - dlf = open(dlp) - lines = dlf.readlines() - dlf.close() - st = os.stat(dlp) - except IOError as e: - e.args += ("reading",) - raise - - olines = set(orig.attrlist("devlink")) - nlines = set(self.attrlist("devlink")) - add_lines = nlines - olines - rem_lines = olines - nlines - - missing_entries = [] - for line in rem_lines: - try: - lineno = lines.index(line.replace("\\t", "\t") + "\n") - except ValueError: - missing_entries.append(line.replace("\\t", "\t")) - continue - del lines[lineno] - - # Don't put in duplicates. Because there's no way to - # tell what driver owns what line, this means that if - # two drivers try to own the same line, one of them will - # be unhappy if the other is uninstalled. So don't do - # that. - lines.extend(( - s.replace("\\t", "\t") + "\n" - for s in add_lines - if s.replace("\\t", "\t") + "\n" not in lines - )) - - try: - dlt, dltp = mkstemp(suffix=".devlink.tab", - dir=image.get_root() + "/etc") - dlt = os.fdopen(dlt, "w") - dlt.writelines(lines) - dlt.close() - os.chmod(dltp, st.st_mode) - os.chown(dltp, st.st_uid, st.st_gid) - os.rename(dltp, dlp) - except EnvironmentError as e: - e.args += ("writing",) - raise - - if missing_entries: - raise RuntimeError(missing_entries) - - if "devlink" in orig.attrs or "devlink" in self.attrs: - try: - update_devlinks() - except IOError as e: - print("{0} ({1}) upgrade (devlinks modification) " - "failed {2} etc/devlink.tab with error: " - "{3} ({4})".format(self.name, - self.attrs["name"], e.args[1], - e.args[0], e.args[2])) - except RuntimeError as e: - print("{0} ({1}) upgrade (devlinks modification) " - "failed modifying\netc/devlink.tab. The " - "following entries were to be removed, " - "but were\nnot found:\n ".format( - self.name, self.attrs["name"]) + - "\n ".join(e.args[0])) - - # For perms, we do removes first because of a busted starting - # point in build 79, where smbsrv has perms of both "* 666" and - # "* 640". The actions move us from 666 to 640, but if we add - # first, the 640 overwrites the 666 in the file, and then the - # deletion of 666 complains and fails. - # - # We can get around it by removing the 666 first, and then - # adding the 640, which overwrites the existing 640. - # - # XXX Need to think if there are any cases where this might be - # the wrong order, and whether the other attributes should be - # done in this order, too. - for i in rem_perms: - args = rem_base + ("-m", i, self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (removal " - "of minor perm '{perm}')", - {"name": self.attrs["name"], "perm": i}) - - for i in add_perms: - args = add_base + ("-m", i, self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (addition " - "of minor perm '{perm}')", - {"name": self.attrs["name"], "perm": i}) - - for i in add_privs: - args = add_base + ("-P", i, self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (addition " - "of privilege '{priv}')", - {"name": self.attrs["name"], "priv": i}) - - for i in rem_privs: - args = rem_base + ("-P", i, self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (removal " - "of privilege '{priv}')", - {"name": self.attrs["name"], "priv": i}) - - # We remove policies before adding them, since removing a policy - # for a driver/minor combination removes it completely from the - # policy file, not just the subset you might have specified. - # - # Also, when removing a policy, there is no way to convince - # update_drv to remove it unless there's a minor node associated - # with it. - for i in rem_policy: - spec = i.split() - # Test if there is a minor node and if so use it - # for the policy removal. Otherwise, if none is - # supplied, then use the wild card to match. - if len(spec) == 3: - minornode = spec[0] - elif len(spec) == 2: - # This can happen when the policy is defined - # in the package manifest without an associated - # minor node. - minornode = "*" - else: - print("driver ({0}) update (removal of " - "policy '{1}') failed: invalid policy " - "spec.".format(self.attrs["name"], i)) - continue - - args = rem_base + ("-p", minornode, self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (removal " - "of policy '{policy}')", - {"name": self.attrs["name"], "policy": i}) - - for i in add_policy: - args = add_base + ("-p", i, self.attrs["name"]) - self.__call(args, "driver ({name}) upgrade (addition " - "of policy '{policy}')", - {"name": self.attrs["name"], "policy": i}) - - for i in rem_clone: - args = rem_base + ("-m", i, "clone") - self.__call(args, "driver ({name}) upgrade (removal " - "of clone permission '{perm}')", - {"name": self.attrs["name"], "perm": i}) - - for i in add_clone: - args = add_base + ("-m", i, "clone") - self.__call(args, "driver ({name}) upgrade (addition " - "of clone permission '{perm}')", - {"name": self.attrs["name"], "perm": i}) - - @staticmethod - def __gen_read_binding_file(img, path, minfields=None, maxfields=None, - raw=False): - - myfile = open(os.path.normpath(os.path.join( - img.get_root(), path))) - for line in myfile: - line = line.strip() - fields = line.split() - result_fields = [] - for field in fields: - # This is a compromise, for now. In fact, - # comments can begin anywhere in the line, - # except inside of quoted material. - if field[0] == "#": - break - field = field.strip('"') - result_fields.append(field) - - if minfields is not None: - if len(result_fields) < minfields: - if raw: - yield line - continue - - if maxfields is not None: - if len(result_fields) > maxfields: - if raw: - yield line - continue - - if result_fields: - yield result_fields - elif raw: - yield line - myfile.close() - - - @classmethod - def __get_image_data(cls, img, name, collect_errs = False): - """Construct a driver action from image information. - - Setting 'collect_errs' to True will collect all caught - exceptions and return them in a tuple with the action. - """ - - errors = [] - - # See if it's installed - found_major = 0 - try: - for fields in DriverAction.__gen_read_binding_file(img, - "etc/name_to_major", minfields=2, maxfields=2): - if fields[0] == name: - found_major += 1 - except IOError as e: - e.args += ("etc/name_to_major",) - if collect_errs: - errors.append(e) - else: - raise - - if found_major == 0: - if collect_errs: - return None, [] - else: - return None - - if found_major > 1: - try: - raise RuntimeError( - "More than one entry for driver '{0}' in " - "/etc/name_to_major".format(name)) - except RuntimeError as e: - if collect_errs: - errors.append(e) - else: - raise - - act = cls(name=name) - - # Grab aliases - try: - act.attrs["alias"] = [] - for fields in DriverAction.__gen_read_binding_file(img, - "etc/driver_aliases", minfields=2, maxfields=2): - if fields[0] == name: - act.attrs["alias"].append(fields[1]) - except IOError as e: - e.args += ("etc/driver_aliases",) - if collect_errs: - errors.append(e) - else: - raise - - # Grab classes - try: - act.attrs["class"] = [] - for fields in DriverAction.__gen_read_binding_file(img, - "etc/driver_classes", minfields=2, maxfields=2): - if fields[0] == name: - act.attrs["class"].append(fields[1]) - except IOError as e: - e.args += ("etc/driver_classes",) - if collect_errs: - errors.append(e) - else: - raise - - # Grab minor node permissions. Note that the clone driver - # action doesn't actually own its minor node perms; those are - # owned by other driver actions, through their clone_perms - # attributes. - try: - act.attrs["perms"] = [] - act.attrs["clone_perms"] = [] - for fields in DriverAction.__gen_read_binding_file(img, - "etc/minor_perm", minfields=4, maxfields=4): - # Break first field into pieces. - namefields = fields[0].split(":") - if len(namefields) != 2: - continue - major = namefields[0] - minor = namefields[1] - if major == name and name != "clone": - act.attrs["perms"].append( - minor + " " + " ".join(fields[1:])) - elif major == "clone" and minor == name: - act.attrs["clone_perms"].append( - minor + " " + " ".join(fields[1:])) - except IOError as e: - e.args += ("etc/minor_perm",) - if collect_errs: - errors.append(e) - else: - raise - - # Grab device policy - try: - dpf = open(os.path.normpath(os.path.join( - img.get_root(), "etc/security/device_policy"))) - except IOError as e: - e.args += ("etc/security/device_policy",) - if collect_errs: - errors.append(e) - else: - raise + # For perms, we do removes first because of a busted starting + # point in build 79, where smbsrv has perms of both "* 666" and + # "* 640". The actions move us from 666 to 640, but if we add + # first, the 640 overwrites the 666 in the file, and then the + # deletion of 666 complains and fails. + # + # We can get around it by removing the 666 first, and then + # adding the 640, which overwrites the existing 640. + # + # XXX Need to think if there are any cases where this might be + # the wrong order, and whether the other attributes should be + # done in this order, too. + for i in rem_perms: + args = rem_base + ("-m", i, self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (removal " "of minor perm '{perm}')", + {"name": self.attrs["name"], "perm": i}, + ) + + for i in add_perms: + args = add_base + ("-m", i, self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (addition " "of minor perm '{perm}')", + {"name": self.attrs["name"], "perm": i}, + ) + + for i in add_privs: + args = add_base + ("-P", i, self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (addition " "of privilege '{priv}')", + {"name": self.attrs["name"], "priv": i}, + ) + + for i in rem_privs: + args = rem_base + ("-P", i, self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (removal " "of privilege '{priv}')", + {"name": self.attrs["name"], "priv": i}, + ) + + # We remove policies before adding them, since removing a policy + # for a driver/minor combination removes it completely from the + # policy file, not just the subset you might have specified. + # + # Also, when removing a policy, there is no way to convince + # update_drv to remove it unless there's a minor node associated + # with it. + for i in rem_policy: + spec = i.split() + # Test if there is a minor node and if so use it + # for the policy removal. Otherwise, if none is + # supplied, then use the wild card to match. + if len(spec) == 3: + minornode = spec[0] + elif len(spec) == 2: + # This can happen when the policy is defined + # in the package manifest without an associated + # minor node. + minornode = "*" + else: + print( + "driver ({0}) update (removal of " + "policy '{1}') failed: invalid policy " + "spec.".format(self.attrs["name"], i) + ) + continue + + args = rem_base + ("-p", minornode, self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (removal " "of policy '{policy}')", + {"name": self.attrs["name"], "policy": i}, + ) + + for i in add_policy: + args = add_base + ("-p", i, self.attrs["name"]) + self.__call( + args, + "driver ({name}) upgrade (addition " "of policy '{policy}')", + {"name": self.attrs["name"], "policy": i}, + ) + + for i in rem_clone: + args = rem_base + ("-m", i, "clone") + self.__call( + args, + "driver ({name}) upgrade (removal " + "of clone permission '{perm}')", + {"name": self.attrs["name"], "perm": i}, + ) + + for i in add_clone: + args = add_base + ("-m", i, "clone") + self.__call( + args, + "driver ({name}) upgrade (addition " + "of clone permission '{perm}')", + {"name": self.attrs["name"], "perm": i}, + ) + + @staticmethod + def __gen_read_binding_file( + img, path, minfields=None, maxfields=None, raw=False + ): + myfile = open(os.path.normpath(os.path.join(img.get_root(), path))) + for line in myfile: + line = line.strip() + fields = line.split() + result_fields = [] + for field in fields: + # This is a compromise, for now. In fact, + # comments can begin anywhere in the line, + # except inside of quoted material. + if field[0] == "#": + break + field = field.strip('"') + result_fields.append(field) + + if minfields is not None: + if len(result_fields) < minfields: + if raw: + yield line + continue + + if maxfields is not None: + if len(result_fields) > maxfields: + if raw: + yield line + continue + + if result_fields: + yield result_fields + elif raw: + yield line + myfile.close() + + @classmethod + def __get_image_data(cls, img, name, collect_errs=False): + """Construct a driver action from image information. + + Setting 'collect_errs' to True will collect all caught + exceptions and return them in a tuple with the action. + """ + + errors = [] + + # See if it's installed + found_major = 0 + try: + for fields in DriverAction.__gen_read_binding_file( + img, "etc/name_to_major", minfields=2, maxfields=2 + ): + if fields[0] == name: + found_major += 1 + except IOError as e: + e.args += ("etc/name_to_major",) + if collect_errs: + errors.append(e) + else: + raise + + if found_major == 0: + if collect_errs: + return None, [] + else: + return None + + if found_major > 1: + try: + raise RuntimeError( + "More than one entry for driver '{0}' in " + "/etc/name_to_major".format(name) + ) + except RuntimeError as e: + if collect_errs: + errors.append(e) else: - act.attrs["policy"] = [ ] - for line in dpf: - line = line.strip() - if line.startswith("#"): - continue - fields = line.split() - if len(fields) < 2: - continue - n = "" - try: - n, c = fields[0].split(":", 1) - fields[0] = c - except ValueError: - # If there is no minor node - # specificition then set it to the - # wildcard but saving the driver - # name first. - n = fields[0] - fields[0] = "*" - except IndexError: - pass - - if n == name: - act.attrs["policy"].append( - " ".join(fields) - ) - dpf.close() - - # Grab device privileges + raise + + act = cls(name=name) + + # Grab aliases + try: + act.attrs["alias"] = [] + for fields in DriverAction.__gen_read_binding_file( + img, "etc/driver_aliases", minfields=2, maxfields=2 + ): + if fields[0] == name: + act.attrs["alias"].append(fields[1]) + except IOError as e: + e.args += ("etc/driver_aliases",) + if collect_errs: + errors.append(e) + else: + raise + + # Grab classes + try: + act.attrs["class"] = [] + for fields in DriverAction.__gen_read_binding_file( + img, "etc/driver_classes", minfields=2, maxfields=2 + ): + if fields[0] == name: + act.attrs["class"].append(fields[1]) + except IOError as e: + e.args += ("etc/driver_classes",) + if collect_errs: + errors.append(e) + else: + raise + + # Grab minor node permissions. Note that the clone driver + # action doesn't actually own its minor node perms; those are + # owned by other driver actions, through their clone_perms + # attributes. + try: + act.attrs["perms"] = [] + act.attrs["clone_perms"] = [] + for fields in DriverAction.__gen_read_binding_file( + img, "etc/minor_perm", minfields=4, maxfields=4 + ): + # Break first field into pieces. + namefields = fields[0].split(":") + if len(namefields) != 2: + continue + major = namefields[0] + minor = namefields[1] + if major == name and name != "clone": + act.attrs["perms"].append( + minor + " " + " ".join(fields[1:]) + ) + elif major == "clone" and minor == name: + act.attrs["clone_perms"].append( + minor + " " + " ".join(fields[1:]) + ) + except IOError as e: + e.args += ("etc/minor_perm",) + if collect_errs: + errors.append(e) + else: + raise + + # Grab device policy + try: + dpf = open( + os.path.normpath( + os.path.join(img.get_root(), "etc/security/device_policy") + ) + ) + except IOError as e: + e.args += ("etc/security/device_policy",) + if collect_errs: + errors.append(e) + else: + raise + else: + act.attrs["policy"] = [] + for line in dpf: + line = line.strip() + if line.startswith("#"): + continue + fields = line.split() + if len(fields) < 2: + continue + n = "" try: - dpf = open(os.path.normpath(os.path.join( - img.get_root(), "etc/security/extra_privs"))) - except IOError as e: - e.args += ("etc/security/extra_privs",) - if collect_errs: - errors.append(e) - else: - raise - else: - act.attrs["privs"] = [ ] - for line in dpf: - line = line.strip() - if line.startswith("#"): - continue - fields = line.split(":", 1) - if len(fields) != 2: - continue - if fields[0] == name: - act.attrs["privs"].append(fields[1]) - dpf.close() + n, c = fields[0].split(":", 1) + fields[0] = c + except ValueError: + # If there is no minor node + # specificition then set it to the + # wildcard but saving the driver + # name first. + n = fields[0] + fields[0] = "*" + except IndexError: + pass + + if n == name: + act.attrs["policy"].append(" ".join(fields)) + dpf.close() + + # Grab device privileges + try: + dpf = open( + os.path.normpath( + os.path.join(img.get_root(), "etc/security/extra_privs") + ) + ) + except IOError as e: + e.args += ("etc/security/extra_privs",) + if collect_errs: + errors.append(e) + else: + raise + else: + act.attrs["privs"] = [] + for line in dpf: + line = line.strip() + if line.startswith("#"): + continue + fields = line.split(":", 1) + if len(fields) != 2: + continue + if fields[0] == name: + act.attrs["privs"].append(fields[1]) + dpf.close() + + if collect_errs: + return act, errors + else: + return act + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + errors = [] + warnings = [] + info = [] + if img.is_zone(): + return errors, warnings, info + + name = self.attrs["name"] + + onfs, errors = self.__get_image_data(img, name, collect_errs=True) + + for i, err in enumerate(errors): + if isinstance(err, IOError): + errors[i] = "{0}: {1}".format(err.args[2], err) + elif isinstance(err, RuntimeError): + errors[i] = _( + "etc/name_to_major: more than " + "one entry for '{0}' is present" + ).format(name) + + if not onfs: + errors[0:0] = [ + _("etc/name_to_major: '{0}' entry not present").format(name) + ] + return errors, warnings, info + + onfs_aliases = set(onfs.attrlist("alias")) + mfst_aliases = set(self.attrlist("alias")) + for a in onfs_aliases - mfst_aliases: + warnings.append( + _("extra alias '{0}' found in " "etc/driver_aliases").format(a) + ) + for a in mfst_aliases - onfs_aliases: + errors.append( + _("alias '{0}' missing from " "etc/driver_aliases").format(a) + ) + + onfs_classes = set(onfs.attrlist("class")) + mfst_classes = set(self.attrlist("class")) + for a in onfs_classes - mfst_classes: + warnings.append( + _("extra class '{0}' found in " "etc/driver_classes").format(a) + ) + for a in mfst_classes - onfs_classes: + errors.append( + _("class '{0}' missing from " "etc/driver_classes").format(a) + ) + + onfs_perms = set(onfs.attrlist("perms")) + mfst_perms = set(self.attrlist("perms")) + for a in onfs_perms - mfst_perms: + warnings.append( + _( + "extra minor node permission '{0}' " + "found in etc/minor_perm" + ).format(a) + ) + for a in mfst_perms - onfs_perms: + errors.append( + _( + "minor node permission '{0}' missing " "from etc/minor_perm" + ).format(a) + ) + + # Canonicalize "*" minorspecs to empty + policylist = list(onfs.attrlist("policy")) + for i, p in enumerate(policylist): + f = p.split() + if f[0] == "*": + policylist[i] = " ".join(f[1:]) + onfs_policy = set(policylist) + + policylist = self.attrlist("policy") + for i, p in enumerate(policylist): + f = p.split() + if f[0] == "*": + policylist[i] = " ".join(f[1:]) + mfst_policy = set(policylist) + for a in onfs_policy - mfst_policy: + warnings.append( + _( + "extra device policy '{0}' found in " + "etc/security/device_policy" + ).format(a) + ) + for a in mfst_policy - onfs_policy: + errors.append( + _( + "device policy '{0}' missing from " + "etc/security/device_policy" + ).format(a) + ) + + onfs_privs = set(onfs.attrlist("privs")) + mfst_privs = set(self.attrlist("privs")) + for a in onfs_privs - mfst_privs: + warnings.append( + _( + "extra device privilege '{0}' found " + "in etc/security/extra_privs" + ).format(a) + ) + for a in mfst_privs - onfs_privs: + errors.append( + _( + "device privilege '{0}' missing from " + "etc/security/extra_privs" + ).format(a) + ) + + return errors, warnings, info + + def remove(self, pkgplan): + image = pkgplan.image + + if image.is_zone(): + return + + args = (self.rem_drv, "-b", image.get_root(), self.attrs["name"]) + + self.__call( + args, "driver ({name}) removal", {"name": self.attrs["name"]} + ) + + for cp in self.attrlist("clone_perms"): + args = ( + self.update_drv, + "-b", + image.get_root(), + "-d", + "-m", + cp, + "clone", + ) + self.__call( + args, + "driver ({name}) clone permission " "update", + {"name": self.attrs["name"]}, + ) + + if "devlink" in self.attrs: + dlp = os.path.normpath( + os.path.join(image.get_root(), "etc/devlink.tab") + ) + + try: + dlf = open(dlp) + lines = dlf.readlines() + dlf.close() + st = os.stat(dlp) + except IOError as e: + print( + "{0} ({1}) removal (devlinks modification) " + "failed reading etc/devlink.tab with error: " + "{2} ({3})".format( + self.name, self.attrs["name"], e.args[0], e.args[1] + ) + ) + return - if collect_errs: - return act, errors - else: - return act - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - errors = [] - warnings = [] - info = [] - if img.is_zone(): - return errors, warnings, info - - name = self.attrs["name"] - - onfs, errors = \ - self.__get_image_data(img, name, collect_errs = True) - - for i, err in enumerate(errors): - if isinstance(err, IOError): - errors[i] = "{0}: {1}".format(err.args[2], err) - elif isinstance(err, RuntimeError): - errors[i] = _("etc/name_to_major: more than " - "one entry for '{0}' is present").format( - name) - - if not onfs: - errors[0:0] = [ - _("etc/name_to_major: '{0}' entry not present").format( - name) - ] - return errors, warnings, info - - onfs_aliases = set(onfs.attrlist("alias")) - mfst_aliases = set(self.attrlist("alias")) - for a in onfs_aliases - mfst_aliases: - warnings.append(_("extra alias '{0}' found in " - "etc/driver_aliases").format(a)) - for a in mfst_aliases - onfs_aliases: - errors.append(_("alias '{0}' missing from " - "etc/driver_aliases").format(a)) - - onfs_classes = set(onfs.attrlist("class")) - mfst_classes = set(self.attrlist("class")) - for a in onfs_classes - mfst_classes: - warnings.append(_("extra class '{0}' found in " - "etc/driver_classes").format(a)) - for a in mfst_classes - onfs_classes: - errors.append(_("class '{0}' missing from " - "etc/driver_classes").format(a)) - - onfs_perms = set(onfs.attrlist("perms")) - mfst_perms = set(self.attrlist("perms")) - for a in onfs_perms - mfst_perms: - warnings.append(_("extra minor node permission '{0}' " - "found in etc/minor_perm").format(a)) - for a in mfst_perms - onfs_perms: - errors.append(_("minor node permission '{0}' missing " - "from etc/minor_perm").format(a)) - - # Canonicalize "*" minorspecs to empty - policylist = list(onfs.attrlist("policy")) - for i, p in enumerate(policylist): - f = p.split() - if f[0] == "*": - policylist[i] = " ".join(f[1:]) - onfs_policy = set(policylist) - - policylist = self.attrlist("policy") - for i, p in enumerate(policylist): - f = p.split() - if f[0] == "*": - policylist[i] = " ".join(f[1:]) - mfst_policy = set(policylist) - for a in onfs_policy - mfst_policy: - warnings.append(_("extra device policy '{0}' found in " - "etc/security/device_policy").format(a)) - for a in mfst_policy - onfs_policy: - errors.append(_("device policy '{0}' missing from " - "etc/security/device_policy").format(a)) - - onfs_privs = set(onfs.attrlist("privs")) - mfst_privs = set(self.attrlist("privs")) - for a in onfs_privs - mfst_privs: - warnings.append(_("extra device privilege '{0}' found " - "in etc/security/extra_privs").format(a)) - for a in mfst_privs - onfs_privs: - errors.append(_("device privilege '{0}' missing from " - "etc/security/extra_privs").format(a)) - - return errors, warnings, info - - def remove(self, pkgplan): - image = pkgplan.image - - if image.is_zone(): - return - - args = ( - self.rem_drv, - "-b", - image.get_root(), - self.attrs["name"] + devlinks = self.attrlist("devlink") + + missing_entries = [] + for line in devlinks: + try: + lineno = lines.index(line.replace("\\t", "\t") + "\n") + except ValueError: + missing_entries.append(line.replace("\\t", "\t")) + continue + del lines[lineno] + + if missing_entries: + print( + "{0} ({1}) removal (devlinks modification) " + "failed modifying\netc/devlink.tab. The " + "following entries were to be removed, " + "but were\nnot found:\n ".format( + self.name, self.attrs["name"] + ) + + "\n ".join(missing_entries) ) - self.__call(args, "driver ({name}) removal", - {"name": self.attrs["name"]}) + try: + dlt, dltp = mkstemp( + suffix=".devlink.tab", dir=image.get_root() + "/etc" + ) + dlt = os.fdopen(dlt, "w") + dlt.writelines(lines) + dlt.close() + os.chmod(dltp, st.st_mode) + os.chown(dltp, st.st_uid, st.st_gid) + os.rename(dltp, dlp) + except EnvironmentError as e: + print( + "{0} ({1}) removal (devlinks modification) " + "failed writing etc/devlink.tab with error: " + "{2} ({3})".format( + self.name, self.attrs["name"], e.args[0], e.args[1] + ) + ) + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + ret = [] + if "name" in self.attrs: + ret.append(("driver", "driver_name", self.attrs["name"], None)) + if "alias" in self.attrs: + ret.append(("driver", "alias", self.attrs["alias"], None)) + return ret - for cp in self.attrlist("clone_perms"): - args = ( - self.update_drv, "-b", image.get_root(), - "-d", "-m", cp, "clone" - ) - self.__call(args, "driver ({name}) clone permission " - "update", {"name": self.attrs["name"]}) - - if "devlink" in self.attrs: - dlp = os.path.normpath(os.path.join( - image.get_root(), "etc/devlink.tab")) - - try: - dlf = open(dlp) - lines = dlf.readlines() - dlf.close() - st = os.stat(dlp) - except IOError as e: - print("{0} ({1}) removal (devlinks modification) " - "failed reading etc/devlink.tab with error: " - "{2} ({3})".format(self.name, - self.attrs["name"], e.args[0], e.args[1])) - return - - devlinks = self.attrlist("devlink") - - missing_entries = [] - for line in devlinks: - try: - lineno = lines.index(line.replace("\\t", "\t") + "\n") - except ValueError: - missing_entries.append(line.replace("\\t", "\t")) - continue - del lines[lineno] - - if missing_entries: - print("{0} ({1}) removal (devlinks modification) " - "failed modifying\netc/devlink.tab. The " - "following entries were to be removed, " - "but were\nnot found:\n ".format( - self.name, self.attrs["name"]) + - "\n ".join(missing_entries)) - - try: - dlt, dltp = mkstemp(suffix=".devlink.tab", - dir=image.get_root() + "/etc") - dlt = os.fdopen(dlt, "w") - dlt.writelines(lines) - dlt.close() - os.chmod(dltp, st.st_mode) - os.chown(dltp, st.st_uid, st.st_gid) - os.rename(dltp, dlp) - except EnvironmentError as e: - print("{0} ({1}) removal (devlinks modification) " - "failed writing etc/devlink.tab with error: " - "{2} ({3})".format(self.name, - self.attrs["name"], e.args[0], e.args[1])) - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - ret = [] - if "name" in self.attrs: - ret.append(("driver", "driver_name", self.attrs["name"], - None)) - if "alias" in self.attrs: - ret.append(("driver", "alias", self.attrs["alias"], - None)) - return ret # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/file.py b/src/modules/actions/file.py index 4640a9ccd..33d13cded 100644 --- a/src/modules/actions/file.py +++ b/src/modules/actions/file.py @@ -52,915 +52,994 @@ from pkg.client.debugvalues import DebugValues try: - import pkg.elf as elf - haveelf = True -except ImportError: - haveelf = False - -class FileAction(generic.Action): - """Class representing a file-type packaging object.""" - - __slots__ = ["hash", "replace_required"] - - name = "file" - key_attr = "path" - unique_attrs = "path", "mode", "owner", "group", "preserve", "sysattr" - globally_identical = True - namespace_group = "path" - ordinality = generic._orderdict[name] - - has_payload = True - - # __init__ is provided as a native function (see end of class - # declaration). - - # this check is only needed on Windows - if portable.ostype == "windows": - def preinstall(self, pkgplan, orig): - """If the file exists, check if it is in use.""" - if not orig: - return - path = orig.get_installed_path(pkgplan.image.get_root()) - if os.path.isfile(path) and self.in_use(path): - raise api_errors.FileInUseException(path) - - def preremove(self, pkgplan): - path = self.get_installed_path(pkgplan.image.get_root()) - if os.path.isfile(path) and self.in_use(path): - raise api_errors.FileInUseException(path) - - def in_use(self, path): - """Determine if a file is in use (locked) by trying - to rename the file to itself.""" - try: - os.rename(path, path) - except OSError as err: - if err.errno != errno.EACCES: - raise - return True - return False - - def __set_data(self, pkgplan): - """Private helper function to set the data field of the - action.""" - - hash_attr, hash_attr_val, hash_func = \ - digest.get_least_preferred_hash(self) - - retrieved = pkgplan.image.imageplan._retrieved - retrieved.add(self.get_installed_path( - pkgplan.image.get_root())) - if len(retrieved) > 50 or \ - DebugValues['max-plan-execute-retrievals'] == 1: - raise api_errors.PlanExecutionError(retrieved) - - # This is an unexpected file retrieval, so the retrieved file - # will be streamed directly from the source to the final - # destination and will not be stored in the image download - # cache. - try: - pub = pkgplan.image.get_publisher( - pkgplan.destination_fmri.publisher) - data = pkgplan.image.transport.get_datastream(pub, - hash_attr_val) - return lambda: data - finally: - pkgplan.image.cleanup_downloads() + import pkg.elf as elf + haveelf = True +except ImportError: + haveelf = False - def install(self, pkgplan, orig): - """Client-side method that installs a file.""" - mode = None - try: - mode = int(self.attrs.get("mode", None), 8) - except (TypeError, ValueError): - # Mode isn't valid, so let validate raise a more - # informative error. - self.validate(fmri=pkgplan.destination_fmri) - - owner, group = self.get_fsobj_uid_gid(pkgplan, - pkgplan.destination_fmri) - - final_path = self.get_installed_path(pkgplan.image.get_root()) - - # Don't allow installation through symlinks. - self.fsobj_checkpath(pkgplan, final_path) - - if not os.path.exists(os.path.dirname(final_path)): - self.makedirs(os.path.dirname(final_path), - mode=misc.PKG_DIR_MODE, - fmri=pkgplan.destination_fmri) - elif (not orig and not pkgplan.origin_fmri and - "preserve" in self.attrs and - self.attrs["preserve"] not in ("abandon", - "install-only") and os.path.isfile(final_path)): - # Unpackaged editable file is already present during - # initial install; salvage it before continuing. - pkgplan.salvage(final_path) - - # XXX If we're upgrading, do we need to preserve file perms from - # existing file? - - # check if we have a save_file active; if so, simulate file - # being already present rather than installed from scratch - - if "save_file" in self.attrs: - orig = self.restore_file(pkgplan.image) - - # See if we need to preserve the file, and if so, set that up. +class FileAction(generic.Action): + """Class representing a file-type packaging object.""" + + __slots__ = ["hash", "replace_required"] + + name = "file" + key_attr = "path" + unique_attrs = "path", "mode", "owner", "group", "preserve", "sysattr" + globally_identical = True + namespace_group = "path" + ordinality = generic._orderdict[name] + + has_payload = True + + # __init__ is provided as a native function (see end of class + # declaration). + + # this check is only needed on Windows + if portable.ostype == "windows": + + def preinstall(self, pkgplan, orig): + """If the file exists, check if it is in use.""" + if not orig: + return + path = orig.get_installed_path(pkgplan.image.get_root()) + if os.path.isfile(path) and self.in_use(path): + raise api_errors.FileInUseException(path) + + def preremove(self, pkgplan): + path = self.get_installed_path(pkgplan.image.get_root()) + if os.path.isfile(path) and self.in_use(path): + raise api_errors.FileInUseException(path) + + def in_use(self, path): + """Determine if a file is in use (locked) by trying + to rename the file to itself.""" + try: + os.rename(path, path) + except OSError as err: + if err.errno != errno.EACCES: + raise + return True + return False + + def __set_data(self, pkgplan): + """Private helper function to set the data field of the + action.""" + + hash_attr, hash_attr_val, hash_func = digest.get_least_preferred_hash( + self + ) + + retrieved = pkgplan.image.imageplan._retrieved + retrieved.add(self.get_installed_path(pkgplan.image.get_root())) + if ( + len(retrieved) > 50 + or DebugValues["max-plan-execute-retrievals"] == 1 + ): + raise api_errors.PlanExecutionError(retrieved) + + # This is an unexpected file retrieval, so the retrieved file + # will be streamed directly from the source to the final + # destination and will not be stored in the image download + # cache. + try: + pub = pkgplan.image.get_publisher( + pkgplan.destination_fmri.publisher + ) + data = pkgplan.image.transport.get_datastream(pub, hash_attr_val) + return lambda: data + finally: + pkgplan.image.cleanup_downloads() + + def install(self, pkgplan, orig): + """Client-side method that installs a file.""" + + mode = None + try: + mode = int(self.attrs.get("mode", None), 8) + except (TypeError, ValueError): + # Mode isn't valid, so let validate raise a more + # informative error. + self.validate(fmri=pkgplan.destination_fmri) + + owner, group = self.get_fsobj_uid_gid(pkgplan, pkgplan.destination_fmri) + + final_path = self.get_installed_path(pkgplan.image.get_root()) + + # Don't allow installation through symlinks. + self.fsobj_checkpath(pkgplan, final_path) + + if not os.path.exists(os.path.dirname(final_path)): + self.makedirs( + os.path.dirname(final_path), + mode=misc.PKG_DIR_MODE, + fmri=pkgplan.destination_fmri, + ) + elif ( + not orig + and not pkgplan.origin_fmri + and "preserve" in self.attrs + and self.attrs["preserve"] not in ("abandon", "install-only") + and os.path.isfile(final_path) + ): + # Unpackaged editable file is already present during + # initial install; salvage it before continuing. + pkgplan.salvage(final_path) + + # XXX If we're upgrading, do we need to preserve file perms from + # existing file? + + # check if we have a save_file active; if so, simulate file + # being already present rather than installed from scratch + + if "save_file" in self.attrs: + orig = self.restore_file(pkgplan.image) + + # See if we need to preserve the file, and if so, set that up. + # + # XXX What happens when we transition from preserve to + # non-preserve or vice versa? Do we want to treat a preserve + # attribute as turning the action into a critical action? + # + # XXX We should save the originally installed file. It can be + # used as an ancestor for a three-way merge, for example. Where + # should it be stored? + pres_type = self._check_preserve(orig, pkgplan) + do_content = True + old_path = None + if pres_type == True or ( + pres_type and pkgplan.origin_fmri == pkgplan.destination_fmri + ): + # File is marked to be preserved and exists so don't + # reinstall content. + do_content = False + elif pres_type == "legacy": + # Only rename old file if this is a transition to + # preserve=legacy from something else. + if orig.attrs.get("preserve", None) != "legacy": + old_path = final_path + ".legacy" + elif pres_type == "renameold.update": + old_path = final_path + ".update" + elif pres_type == "renameold": + old_path = final_path + ".old" + elif pres_type == "renamenew": + final_path = final_path + ".new" + elif pres_type == "abandon": + return + + # If it is a directory (and not empty) then we should + # salvage the contents. + if ( + os.path.exists(final_path) + and not os.path.islink(final_path) + and os.path.isdir(final_path) + ): + try: + os.rmdir(final_path) + except OSError as e: + if e.errno == errno.ENOENT: + pass + elif e.errno in (errno.EEXIST, errno.ENOTEMPTY): + pkgplan.salvage(final_path) + elif e.errno != errno.EACCES: + # this happens on Windows + raise + + # XXX This needs to be modularized. + if do_content and self.needsdata(orig, pkgplan): + tfilefd, temp = tempfile.mkstemp(dir=os.path.dirname(final_path)) + if not self.data: + # The state of the filesystem changed after the + # plan was prepared; attempt a one-off + # retrieval of the data. + self.data = self.__set_data(pkgplan) + stream = self.data() + tfile = os.fdopen(tfilefd, "wb") + try: + # Always verify using the most preferred hash + hash_attr, hash_val, hash_func = digest.get_preferred_hash(self) + shasum = misc.gunzip_from_stream(stream, tfile, hash_func) + except zlib.error as e: + raise ActionExecutionError( + self, + details=_("Error decompressing payload: " "{0}").format( + " ".join([str(a) for a in e.args]) + ), + error=e, + ) + finally: + tfile.close() + stream.close() + + if shasum != hash_val: + raise ActionExecutionError( + self, + details=_( + "Action data hash verification " + "failure: expected: {expected} computed: " + "{actual} action: {action}" + ).format(expected=hash_val, actual=shasum, action=self), + ) + + else: + temp = final_path + + try: + os.chmod(temp, mode) + except OSError as e: + # If the file didn't exist, assume that's intentional, + # and drive on. + if e.errno != errno.ENOENT: + raise + else: + return + + try: + portable.chown(temp, owner, group) + except OSError as e: + if e.errno != errno.EPERM: + raise + + # XXX There's a window where final_path doesn't exist, but we + # probably don't care. + if do_content and old_path: + try: + portable.rename(final_path, old_path) + except OSError as e: + if e.errno != errno.ENOENT: + # Only care if file isn't gone already. + raise + + # This is safe even if temp == final_path. + try: + portable.rename(temp, final_path) + except OSError as e: + raise api_errors.FileInUseException(final_path) + + # Handle timestamp if specified (and content was installed). + if do_content and "timestamp" in self.attrs: + t = misc.timestamp_to_time(self.attrs["timestamp"]) + try: + os.utime(final_path, (t, t)) + except OSError as e: + if e.errno != errno.EACCES: + raise + + # On Windows, the time cannot be changed on a + # read-only file + os.chmod(final_path, stat.S_IRUSR | stat.S_IWUSR) + os.utime(final_path, (t, t)) + os.chmod(final_path, mode) + + # Handle system attributes. + sattr = self.attrs.get("sysattr") + if sattr: + if isinstance(sattr, list): + sattr = ",".join(sattr) + sattrs = sattr.split(",") + if ( + len(sattrs) == 1 + and sattrs[0] not in portable.get_sysattr_dict() + ): + # not a verbose attr, try as a compact attr seq + arg = sattrs[0] + else: + arg = sattrs + + try: + portable.fsetattr(final_path, arg) + except OSError as e: + if e.errno != errno.EINVAL: + raise + warn = _( + "System attributes are not supported " + "on the target image filesystem; 'sysattr'" + " ignored for {0}" + ).format(self.attrs["path"]) + pkgplan.image.imageplan.pd.add_item_message( + pkgplan.destination_fmri, + misc.time_to_timestamp(time.time()), + MSG_WARNING, + warn, + ) + except ValueError as e: + warn = _( + "Could not set system attributes for {path}" + "'{attrlist}': {err}" + ).format(attrlist=sattr, err=e, path=self.attrs["path"]) + pkgplan.image.imageplan.pd.add_item_message( + pkgplan.destination_fmri, + misc.time_to_timestamp(time.time()), + MSG_WARNING, + warn, + ) + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image. + + In detail, this verifies that the file is present, and if + the preserve attribute is not present, that the hashes + and other attributes of the file match.""" + + if self.attrs.get("preserve") == "abandon": + return [], [], [] + + path = self.get_installed_path(img.get_root()) + + lstat, errors, warnings, info, abort = self.verify_fsobj_common( + img, stat.S_IFREG + ) + if lstat: + if not stat.S_ISREG(lstat.st_mode): + self.replace_required = True + + if abort: + assert errors + self.replace_required = True + return errors, warnings, info + + if path.lower().endswith("/bobcat") and args["verbose"] == True: + # Returned as a purely informational (untranslated) + # message so that no client should interpret it as a + # reason to fail verification. + info.append( + "Warning: package may contain bobcat! " + "(http://xkcd.com/325/)" + ) + + preserve = self.attrs.get("preserve") + + if ( + preserve is None + and "timestamp" in self.attrs + and lstat.st_mtime + != misc.timestamp_to_time(self.attrs["timestamp"]) + ): + errors.append( + _("Timestamp: {found} should be " "{expected}").format( + found=misc.time_to_timestamp(lstat.st_mtime), + expected=self.attrs["timestamp"], + ) + ) + + # avoid checking pkg.size if we have any content-hashes present; + # different size files may have the same content-hash + pkg_size = int(self.attrs.get("pkg.size", 0)) + if ( + preserve is None + and pkg_size > 0 + and not set(digest.DEFAULT_GELF_HASH_ATTRS).intersection( + set(self.attrs.keys()) + ) + and lstat.st_size != pkg_size + ): + errors.append( + _("Size: {found:d} bytes should be " "{expected:d}").format( + found=lstat.st_size, expected=pkg_size + ) + ) + + if preserve is not None and args["verbose"] == False or lstat is None: + return errors, warnings, info + + if args["forever"] != True: + return errors, warnings, info + + # + # Check file contents. + # + try: + # This is a generic mechanism, but only used for libc on + # x86, where the "best" version of libc is lofs-mounted + # on the canonical path, foiling the standard verify + # checks. + is_mtpt = self.attrs.get("mountpoint", "").lower() == "true" + elfhash = None + elferror = None + ( + elf_hash_attr, + elf_hash_val, + elf_hash_func, + ) = digest.get_preferred_hash(self, hash_type=pkg.digest.HASH_GELF) + if elf_hash_attr and haveelf and not is_mtpt: # - # XXX What happens when we transition from preserve to - # non-preserve or vice versa? Do we want to treat a preserve - # attribute as turning the action into a critical action? + # It's possible for the elf module to + # throw while computing the hash, + # especially if the file is badly + # corrupted or truncated. # - # XXX We should save the originally installed file. It can be - # used as an ancestor for a three-way merge, for example. Where - # should it be stored? - pres_type = self._check_preserve(orig, pkgplan) - do_content = True - old_path = None - if pres_type == True or (pres_type and - pkgplan.origin_fmri == pkgplan.destination_fmri): - # File is marked to be preserved and exists so don't - # reinstall content. - do_content = False - elif pres_type == "legacy": - # Only rename old file if this is a transition to - # preserve=legacy from something else. - if orig.attrs.get("preserve", None) != "legacy": - old_path = final_path + ".legacy" - elif pres_type == "renameold.update": - old_path = final_path + ".update" - elif pres_type == "renameold": - old_path = final_path + ".old" - elif pres_type == "renamenew": - final_path = final_path + ".new" - elif pres_type == "abandon": - return - - # If it is a directory (and not empty) then we should - # salvage the contents. - if os.path.exists(final_path) and \ - not os.path.islink(final_path) and \ - os.path.isdir(final_path): - try: - os.rmdir(final_path) - except OSError as e: - if e.errno == errno.ENOENT: - pass - elif e.errno in (errno.EEXIST, errno.ENOTEMPTY): - pkgplan.salvage(final_path) - elif e.errno != errno.EACCES: - # this happens on Windows - raise - - # XXX This needs to be modularized. - if do_content and self.needsdata(orig, pkgplan): - tfilefd, temp = tempfile.mkstemp(dir=os.path.dirname( - final_path)) - if not self.data: - # The state of the filesystem changed after the - # plan was prepared; attempt a one-off - # retrieval of the data. - self.data = self.__set_data(pkgplan) - stream = self.data() - tfile = os.fdopen(tfilefd, "wb") - try: - # Always verify using the most preferred hash - hash_attr, hash_val, hash_func = \ - digest.get_preferred_hash(self) - shasum = misc.gunzip_from_stream(stream, tfile, - hash_func) - except zlib.error as e: - raise ActionExecutionError(self, - details=_("Error decompressing payload: " - "{0}").format( - " ".join([str(a) for a in e.args])), - error=e) - finally: - tfile.close() - stream.close() - - if shasum != hash_val: - raise ActionExecutionError(self, - details=_("Action data hash verification " - "failure: expected: {expected} computed: " - "{actual} action: {action}").format( - expected=hash_val, - actual=shasum, - action=self - )) - - else: - temp = final_path - try: - os.chmod(temp, mode) - except OSError as e: - # If the file didn't exist, assume that's intentional, - # and drive on. - if e.errno != errno.ENOENT: - raise - else: - return - - try: - portable.chown(temp, owner, group) - except OSError as e: - if e.errno != errno.EPERM: - raise - - # XXX There's a window where final_path doesn't exist, but we - # probably don't care. - if do_content and old_path: - try: - portable.rename(final_path, old_path) - except OSError as e: - if e.errno != errno.ENOENT: - # Only care if file isn't gone already. - raise - - # This is safe even if temp == final_path. - try: - portable.rename(temp, final_path) - except OSError as e: - raise api_errors.FileInUseException(final_path) - - # Handle timestamp if specified (and content was installed). - if do_content and "timestamp" in self.attrs: - t = misc.timestamp_to_time(self.attrs["timestamp"]) - try: - os.utime(final_path, (t, t)) - except OSError as e: - if e.errno != errno.EACCES: - raise - - # On Windows, the time cannot be changed on a - # read-only file - os.chmod(final_path, stat.S_IRUSR|stat.S_IWUSR) - os.utime(final_path, (t, t)) - os.chmod(final_path, mode) - - # Handle system attributes. - sattr = self.attrs.get("sysattr") - if sattr: - if isinstance(sattr, list): - sattr = ",".join(sattr) - sattrs = sattr.split(",") - if len(sattrs) == 1 and \ - sattrs[0] not in portable.get_sysattr_dict(): - # not a verbose attr, try as a compact attr seq - arg = sattrs[0] - else: - arg = sattrs - - try: - portable.fsetattr(final_path, arg) - except OSError as e: - if e.errno != errno.EINVAL: - raise - warn = _("System attributes are not supported " - "on the target image filesystem; 'sysattr'" - " ignored for {0}").format( - self.attrs["path"]) - pkgplan.image.imageplan.pd.add_item_message( - pkgplan.destination_fmri, - misc.time_to_timestamp(time.time()), - MSG_WARNING, warn) - except ValueError as e: - warn = _("Could not set system attributes for {path}" - "'{attrlist}': {err}").format( - attrlist=sattr, - err=e, - path=self.attrs["path"] - ) - pkgplan.image.imageplan.pd.add_item_message( - pkgplan.destination_fmri, - misc.time_to_timestamp(time.time()), - MSG_WARNING, warn) - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image. - - In detail, this verifies that the file is present, and if - the preserve attribute is not present, that the hashes - and other attributes of the file match.""" - - if self.attrs.get("preserve") == "abandon": - return [], [], [] - - path = self.get_installed_path(img.get_root()) - - lstat, errors, warnings, info, abort = \ - self.verify_fsobj_common(img, stat.S_IFREG) - if lstat: - if not stat.S_ISREG(lstat.st_mode): - self.replace_required = True - - if abort: - assert errors + # On path, only calculate the + # content hash that matches + # the preferred one on the + # action + get_elfhash = elf_hash_attr == "elfhash" + get_sha256 = ( + not get_elfhash + and elf_hash_func + == digest.GELF_HASH_ALGS["gelf:sha256"] + ) + get_sha512t_256 = ( + not get_elfhash + and elf_hash_func + == digest.GELF_HASH_ALGS["gelf:sha512t_256"] + ) + elfhash = elf.get_hashes( + path, + elfhash=get_elfhash, + sha256=get_sha256, + sha512t_256=get_sha512t_256, + )[elf_hash_attr] + + if get_elfhash: + elfhash = [elfhash] + else: + elfhash = list(digest.ContentHash(elfhash).values()) + except elf.ElfError as e: + # Any ELF error means there is something bad + # with the file, mark as needing to be replaced. + elferror = _("ELF failure: {0}").format(e) + + if elfhash is not None and elf_hash_val != elfhash[0]: + elferror = _( + "ELF content hash: " "{found} " "should be {expected}" + ).format(found=elfhash[0], expected=elf_hash_val) + + # Always check on the file hash because the ELF hash + # check only checks on the ELF parts and does not + # check for some other file integrity issues. + if not is_mtpt: + hash_attr, hash_val, hash_func = digest.get_preferred_hash(self) + sha_hash, data = misc.get_data_digest(path, hash_func=hash_func) + if sha_hash != hash_val: + # Prefer the ELF content hash error message. + if preserve is not None: + info.append(_("editable file has " "been changed")) + elif elferror: + errors.append(elferror) + self.replace_required = True + else: + errors.append( + _( + "Hash: " "{found} should be " "{expected}" + ).format(found=sha_hash, expected=hash_val) + ) self.replace_required = True - return errors, warnings, info - - if path.lower().endswith("/bobcat") and args["verbose"] == True: - # Returned as a purely informational (untranslated) - # message so that no client should interpret it as a - # reason to fail verification. - info.append("Warning: package may contain bobcat! " - "(http://xkcd.com/325/)") - - preserve = self.attrs.get("preserve") - - if (preserve is None and - "timestamp" in self.attrs and lstat.st_mtime != - misc.timestamp_to_time(self.attrs["timestamp"])): - errors.append(_("Timestamp: {found} should be " - "{expected}").format( - found=misc.time_to_timestamp(lstat.st_mtime), - expected=self.attrs["timestamp"])) - - # avoid checking pkg.size if we have any content-hashes present; - # different size files may have the same content-hash - pkg_size = int(self.attrs.get("pkg.size", 0)) - if preserve is None and pkg_size > 0 and \ - not set(digest.DEFAULT_GELF_HASH_ATTRS).intersection( - set(self.attrs.keys())) and \ - lstat.st_size != pkg_size: - errors.append(_("Size: {found:d} bytes should be " - "{expected:d}").format(found=lstat.st_size, - expected=pkg_size)) - - if (preserve is not None and args["verbose"] == False or - lstat is None): - return errors, warnings, info - - if args["forever"] != True: - return errors, warnings, info - - # - # Check file contents. - # - try: - # This is a generic mechanism, but only used for libc on - # x86, where the "best" version of libc is lofs-mounted - # on the canonical path, foiling the standard verify - # checks. - is_mtpt = self.attrs.get("mountpoint", "").lower() == "true" - elfhash = None - elferror = None - elf_hash_attr, elf_hash_val, \ - elf_hash_func = \ - digest.get_preferred_hash(self, - hash_type=pkg.digest.HASH_GELF) - if elf_hash_attr and haveelf and not is_mtpt: - # - # It's possible for the elf module to - # throw while computing the hash, - # especially if the file is badly - # corrupted or truncated. - # - try: - # On path, only calculate the - # content hash that matches - # the preferred one on the - # action - get_elfhash = \ - elf_hash_attr == "elfhash" - get_sha256 = (not get_elfhash and - elf_hash_func == - digest.GELF_HASH_ALGS["gelf:sha256"]) - get_sha512t_256 = (not get_elfhash and - elf_hash_func == - digest.GELF_HASH_ALGS["gelf:sha512t_256"]) - elfhash = elf.get_hashes( - path, elfhash=get_elfhash, - sha256=get_sha256, - sha512t_256=get_sha512t_256 - )[elf_hash_attr] - - if get_elfhash: - elfhash = [elfhash] - else: - elfhash = list(digest.ContentHash(elfhash).values()) - except elf.ElfError as e: - # Any ELF error means there is something bad - # with the file, mark as needing to be replaced. - elferror = _("ELF failure: {0}").format(e) - - if (elfhash is not None and - elf_hash_val != elfhash[0]): - elferror = _("ELF content hash: " - "{found} " - "should be {expected}").format( - found=elfhash[0], - expected=elf_hash_val) - - # Always check on the file hash because the ELF hash - # check only checks on the ELF parts and does not - # check for some other file integrity issues. - if not is_mtpt: - hash_attr, hash_val, hash_func = \ - digest.get_preferred_hash(self) - sha_hash, data = misc.get_data_digest(path, - hash_func=hash_func) - if sha_hash != hash_val: - # Prefer the ELF content hash error message. - if preserve is not None: - info.append(_( - "editable file has " - "been changed")) - elif elferror: - errors.append(elferror) - self.replace_required = True - else: - errors.append(_("Hash: " - "{found} should be " - "{expected}").format( - found=sha_hash, - expected=hash_val)) - self.replace_required = True - - # Check system attributes. - # Since some attributes like 'archive' or 'av_modified' - # are set automatically by the FS, it makes no sense to - # check for 1:1 matches. So we only check that the - # system attributes specified in the action are still - # set on the file. - sattr = self.attrs.get("sysattr", None) - if sattr: - if isinstance(sattr, list): - sattr = ",".join(sattr) - sattrs = sattr.split(",") - if len(sattrs) == 1 and \ - sattrs[0] not in portable.get_sysattr_dict(): - # not a verbose attr, try as a compact - set_attrs = portable.fgetattr(path, - compact=True) - sattrs = sattrs[0] - else: - set_attrs = portable.fgetattr(path) - - for a in sattrs: - if a not in set_attrs: - errors.append( - _("System attribute '{0}' " - "not set").format(a)) - - except EnvironmentError as e: - if e.errno == errno.EACCES: - errors.append(_("Skipping: Permission Denied")) - else: - errors.append(_("Unexpected Error: {0}").format( - e)) - except Exception as e: - errors.append(_("Unexpected Exception: {0}").format(e)) - - return errors, warnings, info - - def __check_preserve_version(self, orig): - """Any action that can have a 'preserve' attribute should also - be able to have a 'preserve-version' attribute (that - represents a simple package FMRI release version; no - timestamp, etc., just 'X.n.n...'). - - In the absence of a 'preserve-version' attribute, '0' will be - assumed. - - When performing downgrades, if the installed editable file has - been modified (compared to the proposed packaged version), the - behavior will be as follows: - - if the installed action's 'preserve-version' is greater than - the proposed 'preserve-version', the installed file will be - renamed with '.update' and the proposed file will be - installed. - - if the installed action's 'preserve-version' is equal to or - less than the proposed 'preserve-version', the installed file - content will not be modified.""" - - orig_preserve_ver = version.Version("0") - preserve_ver = version.Version("0") - - try: - ver = orig.attrs["preserve-version"] - orig_preserve_ver = version.Version(ver) - except KeyError: - pass - - try: - ver = self.attrs["preserve-version"] - preserve_ver = version.Version(ver) - except KeyError: - pass - - if orig_preserve_ver > preserve_ver: - # .old is intentionally avoided here to prevent - # accidental collisions with the normal install - # process. - return "renameold.update" + # Check system attributes. + # Since some attributes like 'archive' or 'av_modified' + # are set automatically by the FS, it makes no sense to + # check for 1:1 matches. So we only check that the + # system attributes specified in the action are still + # set on the file. + sattr = self.attrs.get("sysattr", None) + if sattr: + if isinstance(sattr, list): + sattr = ",".join(sattr) + sattrs = sattr.split(",") + if ( + len(sattrs) == 1 + and sattrs[0] not in portable.get_sysattr_dict() + ): + # not a verbose attr, try as a compact + set_attrs = portable.fgetattr(path, compact=True) + sattrs = sattrs[0] + else: + set_attrs = portable.fgetattr(path) + + for a in sattrs: + if a not in set_attrs: + errors.append( + _("System attribute '{0}' " "not set").format(a) + ) + + except EnvironmentError as e: + if e.errno == errno.EACCES: + errors.append(_("Skipping: Permission Denied")) + else: + errors.append(_("Unexpected Error: {0}").format(e)) + except Exception as e: + errors.append(_("Unexpected Exception: {0}").format(e)) + + return errors, warnings, info + + def __check_preserve_version(self, orig): + """Any action that can have a 'preserve' attribute should also + be able to have a 'preserve-version' attribute (that + represents a simple package FMRI release version; no + timestamp, etc., just 'X.n.n...'). + + In the absence of a 'preserve-version' attribute, '0' will be + assumed. + + When performing downgrades, if the installed editable file has + been modified (compared to the proposed packaged version), the + behavior will be as follows: + + if the installed action's 'preserve-version' is greater than + the proposed 'preserve-version', the installed file will be + renamed with '.update' and the proposed file will be + installed. + + if the installed action's 'preserve-version' is equal to or + less than the proposed 'preserve-version', the installed file + content will not be modified.""" + + orig_preserve_ver = version.Version("0") + preserve_ver = version.Version("0") + + try: + ver = orig.attrs["preserve-version"] + orig_preserve_ver = version.Version(ver) + except KeyError: + pass + + try: + ver = self.attrs["preserve-version"] + preserve_ver = version.Version(ver) + except KeyError: + pass + + if orig_preserve_ver > preserve_ver: + # .old is intentionally avoided here to prevent + # accidental collisions with the normal install + # process. + return "renameold.update" + + return True + + def _check_preserve(self, orig, pkgplan, orig_path=None): + """Return the type of preservation needed for this action. + + Returns None if preservation is not defined by the action. + Returns False if it is, but no preservation is necessary. + Returns True for the normal preservation form. Returns one of + the strings 'renameold', 'renameold.update', 'renamenew', + 'legacy', or 'abandon' for each of the respective forms of + preservation. + """ + + # If the logic in this function ever changes, all callers will + # need to be updated to reflect how they interpret return + # values. + + try: + pres_type = self.attrs["preserve"] + except KeyError: + return + + # Should ultimately be conditioned on file type + if "elfhash" in self.attrs: + # Don't allow preserve logic to be applied to elf files; + # if we ever stop tagging elf binaries with this + # attribute, this will need to be updated. + return + + if pres_type == "abandon": + return pres_type + + final_path = self.get_installed_path(pkgplan.image.get_root()) + + # 'legacy' preservation is very different than other forms of + # preservation as it doesn't account for the on-disk state of + # the action's payload. + if pres_type == "legacy": + if not orig: + # This is an initial install or a repair, so + # there's nothing to deliver. + return True + return pres_type + + # If action has been marked with a preserve attribute, the + # hash of the preserved file has changed between versions, + # and the package being installed is older than the package + # that was installed, and the version on disk is different + # than the installed package's original version, then preserve + # the installed file by renaming it. + # + # If pkgplan.origin_fmri isn't set, but there is an orig action, + # then this file is moving between packages and it can't be + # a downgrade since that isn't allowed across rename or obsolete + # boundaries. + is_file = os.path.isfile(final_path) + + # 'install-only' preservation has very specific semantics as + # well; if there's an 'orig' or this is an initial install and + # the file exists, we should not modify the file content. + if pres_type == "install-only": + if orig or is_file: return True + return False + + changed_hash = False + if orig: + # We must use the same hash algorithm when comparing old + # and new actions. Look for the most-preferred common + # hash between old and new. Since the two actions may + # not share a common hash (in which case, we get a tuple + # of 'None' objects) we also need to know the preferred + # hash to use when examining the old action on its own. + ( + common_hash_attr, + common_hash_val, + common_orig_hash_val, + common_hash_func, + ) = digest.get_common_preferred_hash(self, orig) + + hattr, orig_hash_val, orig_hash_func = digest.get_preferred_hash( + orig + ) + + if common_orig_hash_val and common_hash_val: + changed_hash = common_hash_val != common_orig_hash_val + else: + # we don't have a common hash, so we must treat + # this as a changed action + changed_hash = True + + if ( + pkgplan.destination_fmri + and changed_hash + and pkgplan.origin_fmri + and pkgplan.destination_fmri.version + < pkgplan.origin_fmri.version + ): + # Installed, preserved file is for a package + # newer than what will be installed. So check if + # the version on disk is different than what + # was originally delivered, and if so, preserve + # it. + if not is_file: + return False + + preserve_version = self.__check_preserve_version(orig) + if not preserve_version: + return False + + ihash, cdata = misc.get_data_digest( + final_path, hash_func=orig_hash_func + ) + if ihash != orig_hash_val: + return preserve_version - def _check_preserve(self, orig, pkgplan, orig_path=None): - """Return the type of preservation needed for this action. + return True - Returns None if preservation is not defined by the action. - Returns False if it is, but no preservation is necessary. - Returns True for the normal preservation form. Returns one of - the strings 'renameold', 'renameold.update', 'renamenew', - 'legacy', or 'abandon' for each of the respective forms of - preservation. - """ + if orig and orig_path: + # Comparison will be based on a file being moved. + is_file = os.path.isfile(orig_path) + + # If the action has been marked with a preserve attribute, and + # the file exists and has a content hash different from what the + # system expected it to be, then we preserve the original file + # in some way, depending on the value of preserve. + if is_file: + # if we had an action installed, then we know what hash + # function was used to compute it's hash attribute. + if orig: + if not orig_path: + orig_path = final_path + chash, cdata = misc.get_data_digest( + orig_path, hash_func=orig_hash_func + ) + if not orig or chash != orig_hash_val: + if pres_type in ("renameold", "renamenew"): + return pres_type + return True + elif not changed_hash and chash == orig_hash_val: + # If packaged content has not changed since last + # version and on-disk content matches the last + # version, preserve on-disk file. + return True - # If the logic in this function ever changes, all callers will - # need to be updated to reflect how they interpret return - # values. + return False + + # If we're not upgrading, or the file contents have changed, + # retrieve the file and write it to a temporary location. + # For files with content-hash attributes, only write the new file if the + # content-hash changed. + def needsdata(self, orig, pkgplan): + if self.replace_required: + return True + + # import goes here to prevent circular import + from pkg.client.imageconfig import CONTENT_UPDATE_POLICY + + use_content_hash = ( + orig + and pkgplan.image.cfg.get_policy_str(CONTENT_UPDATE_POLICY) + == "when-required" + ) + + # If content update policy allows it, check for a common + # preferred content hash. + if use_content_hash: + ( + content_hash_attr, + content_hash_val, + orig_content_hash_val, + content_hash_func, + ) = digest.get_common_preferred_hash( + self, orig, hash_type=digest.HASH_GELF + ) + + ( + hash_attr, + hash_val, + orig_hash_val, + hash_func, + ) = digest.get_common_preferred_hash(self, orig) + + if not orig: + changed_hash = True + elif orig and (orig_hash_val is None or hash_val is None): + # we have no common hash so we have to treat this as a + # changed action + changed_hash = True + else: + changed_hash = hash_val != orig_hash_val + + if changed_hash and ( + not use_content_hash or content_hash_val != orig_content_hash_val + ): + if "preserve" not in self.attrs or not pkgplan.origin_fmri: + return True + elif orig: + # It's possible that the file content hasn't changed + # for an upgrade case, but the file is missing. This + # ensures that for cases where the mode or some other + # attribute of the file has changed that the file will + # be installed. + path = self.get_installed_path(pkgplan.image.get_root()) + if not os.path.isfile(path): + return True + pres_type = self._check_preserve(orig, pkgplan) + if pres_type not in (None, True, "abandon"): + # Preserved files only need data if they're being + # changed (e.g. "renameold", etc.). + return True + + return False + + def remove(self, pkgplan): + path = self.get_installed_path(pkgplan.image.get_root()) + + # Are we supposed to save this file to restore it elsewhere + # or in another pkg? 'save_file' is set by the imageplan. + save_file = self.attrs.get("save_file") + if save_file: + # 'save_file' contains a tuple of (orig_name, + # remove_file). + remove = save_file[1] + self.save_file(pkgplan.image, path) + if remove != "true": + # File must be left in place (this file is + # likely overlaid and is moving). + return + + if self.attrs.get("preserve") in ("abandon", "install-only"): + return + + if ( + not pkgplan.destination_fmri + and self.attrs.get("preserve", "false").lower() != "false" + ): + # Preserved files are salvaged if they have been + # modified since they were installed and this is + # not an upgrade. + try: + hash_attr, hash_val, hash_func = digest.get_preferred_hash(self) + ihash, cdata = misc.get_data_digest(path, hash_func=hash_func) + if ihash != hash_val: + pkgplan.salvage(path) + # Nothing more to do. + return + except EnvironmentError as e: + if e.errno == errno.ENOENT: + # Already gone; don't care. + return + raise + + # Attempt to remove the file. + rm_exc = None + try: + self.remove_fsobj(pkgplan, path) + return + except Exception as e: + if e.errno != errno.EACCES: + raise + rm_exc = e + + # There are only two likely reasons we couldn't remove the file; + # either because the parent directory isn't writable, or + # because the file is read-only and the OS isn't allowing its + # removal. Assume both and try making both the parent directory + # and the file writable, removing the file, and finally + # resetting the directory to its original mode. + pdir = os.path.dirname(path) + pmode = None + try: + if pdir != pkgplan.image.get_root(): + # Parent directory is not image root (e.g. '/'). + ps = os.lstat(pdir) + pmode = ps.st_mode + os.chmod(pdir, misc.PKG_DIR_MODE) + + # Make file writable and try removing it again; required + # on some operating systems or potentially for some + # filesystems? + os.chmod(path, stat.S_IWRITE | stat.S_IREAD) + self.remove_fsobj(pkgplan, path) + except Exception as e: + # Raise new exception chained to old. + six.raise_from(e, rm_exc) + finally: + # If parent directory wasn't image root, then assume + # mode needs reset. + if pmode is not None: try: - pres_type = self.attrs["preserve"] - except KeyError: - return - - # Should ultimately be conditioned on file type - if "elfhash" in self.attrs: - # Don't allow preserve logic to be applied to elf files; - # if we ever stop tagging elf binaries with this - # attribute, this will need to be updated. - return - - if pres_type == "abandon": - return pres_type - - final_path = self.get_installed_path(pkgplan.image.get_root()) - - # 'legacy' preservation is very different than other forms of - # preservation as it doesn't account for the on-disk state of - # the action's payload. - if pres_type == "legacy": - if not orig: - # This is an initial install or a repair, so - # there's nothing to deliver. - return True - return pres_type - - # If action has been marked with a preserve attribute, the - # hash of the preserved file has changed between versions, - # and the package being installed is older than the package - # that was installed, and the version on disk is different - # than the installed package's original version, then preserve - # the installed file by renaming it. - # - # If pkgplan.origin_fmri isn't set, but there is an orig action, - # then this file is moving between packages and it can't be - # a downgrade since that isn't allowed across rename or obsolete - # boundaries. - is_file = os.path.isfile(final_path) - - # 'install-only' preservation has very specific semantics as - # well; if there's an 'orig' or this is an initial install and - # the file exists, we should not modify the file content. - if pres_type == "install-only": - if orig or is_file: - return True - return False - - changed_hash = False - if orig: - # We must use the same hash algorithm when comparing old - # and new actions. Look for the most-preferred common - # hash between old and new. Since the two actions may - # not share a common hash (in which case, we get a tuple - # of 'None' objects) we also need to know the preferred - # hash to use when examining the old action on its own. - common_hash_attr, common_hash_val, \ - common_orig_hash_val, common_hash_func = \ - digest.get_common_preferred_hash(self, orig) - - hattr, orig_hash_val, orig_hash_func = \ - digest.get_preferred_hash(orig) - - if common_orig_hash_val and common_hash_val: - changed_hash = common_hash_val != common_orig_hash_val - else: - # we don't have a common hash, so we must treat - # this as a changed action - changed_hash = True - - if pkgplan.destination_fmri and \ - changed_hash and \ - pkgplan.origin_fmri and \ - pkgplan.destination_fmri.version < pkgplan.origin_fmri.version: - # Installed, preserved file is for a package - # newer than what will be installed. So check if - # the version on disk is different than what - # was originally delivered, and if so, preserve - # it. - if not is_file: - return False - - preserve_version = self.__check_preserve_version(orig) - if not preserve_version: - return False - - ihash, cdata = misc.get_data_digest( - final_path, - hash_func=orig_hash_func) - if ihash != orig_hash_val: - return preserve_version - - return True - - if (orig and orig_path): - # Comparison will be based on a file being moved. - is_file = os.path.isfile(orig_path) - - # If the action has been marked with a preserve attribute, and - # the file exists and has a content hash different from what the - # system expected it to be, then we preserve the original file - # in some way, depending on the value of preserve. - if is_file: - # if we had an action installed, then we know what hash - # function was used to compute it's hash attribute. - if orig: - if not orig_path: - orig_path = final_path - chash, cdata = misc.get_data_digest(orig_path, - hash_func=orig_hash_func) - if not orig or chash != orig_hash_val: - if pres_type in ("renameold", "renamenew"): - return pres_type - return True - elif not changed_hash and chash == orig_hash_val: - # If packaged content has not changed since last - # version and on-disk content matches the last - # version, preserve on-disk file. - return True - - return False - - # If we're not upgrading, or the file contents have changed, - # retrieve the file and write it to a temporary location. - # For files with content-hash attributes, only write the new file if the - # content-hash changed. - def needsdata(self, orig, pkgplan): - if self.replace_required: - return True - - # import goes here to prevent circular import - from pkg.client.imageconfig import CONTENT_UPDATE_POLICY - - use_content_hash = orig and pkgplan.image.cfg.get_policy_str( - CONTENT_UPDATE_POLICY) == "when-required" - - # If content update policy allows it, check for a common - # preferred content hash. - if use_content_hash: - content_hash_attr, content_hash_val, \ - orig_content_hash_val, content_hash_func = \ - digest.get_common_preferred_hash( - self, orig, hash_type=digest.HASH_GELF) - - hash_attr, hash_val, orig_hash_val, hash_func = \ - digest.get_common_preferred_hash(self, orig) - - if not orig: - changed_hash = True - elif orig and (orig_hash_val is None or - hash_val is None): - # we have no common hash so we have to treat this as a - # changed action - changed_hash = True - else: - changed_hash = hash_val != orig_hash_val - - if (changed_hash and - (not use_content_hash or - content_hash_val != orig_content_hash_val)): - if ("preserve" not in self.attrs or - not pkgplan.origin_fmri): - return True - elif orig: - # It's possible that the file content hasn't changed - # for an upgrade case, but the file is missing. This - # ensures that for cases where the mode or some other - # attribute of the file has changed that the file will - # be installed. - path = self.get_installed_path(pkgplan.image.get_root()) - if not os.path.isfile(path): - return True - - pres_type = self._check_preserve(orig, pkgplan) - if pres_type not in (None, True, "abandon"): - # Preserved files only need data if they're being - # changed (e.g. "renameold", etc.). - return True - - return False - - def remove(self, pkgplan): - path = self.get_installed_path(pkgplan.image.get_root()) - - # Are we supposed to save this file to restore it elsewhere - # or in another pkg? 'save_file' is set by the imageplan. - save_file = self.attrs.get("save_file") - if save_file: - # 'save_file' contains a tuple of (orig_name, - # remove_file). - remove = save_file[1] - self.save_file(pkgplan.image, path) - if remove != "true": - # File must be left in place (this file is - # likely overlaid and is moving). - return - - if self.attrs.get("preserve") in ("abandon", "install-only"): - return - - if not pkgplan.destination_fmri and \ - self.attrs.get("preserve", "false").lower() != "false": - # Preserved files are salvaged if they have been - # modified since they were installed and this is - # not an upgrade. - try: - hash_attr, hash_val, hash_func = \ - digest.get_preferred_hash(self) - ihash, cdata = misc.get_data_digest(path, - hash_func=hash_func) - if ihash != hash_val: - pkgplan.salvage(path) - # Nothing more to do. - return - except EnvironmentError as e: - if e.errno == errno.ENOENT: - # Already gone; don't care. - return - raise - - # Attempt to remove the file. - rm_exc = None - try: - self.remove_fsobj(pkgplan, path) - return - except Exception as e: - if e.errno != errno.EACCES: - raise - rm_exc = e - - # There are only two likely reasons we couldn't remove the file; - # either because the parent directory isn't writable, or - # because the file is read-only and the OS isn't allowing its - # removal. Assume both and try making both the parent directory - # and the file writable, removing the file, and finally - # resetting the directory to its original mode. - pdir = os.path.dirname(path) - pmode = None - try: - if pdir != pkgplan.image.get_root(): - # Parent directory is not image root (e.g. '/'). - ps = os.lstat(pdir) - pmode = ps.st_mode - os.chmod(pdir, misc.PKG_DIR_MODE) - - # Make file writable and try removing it again; required - # on some operating systems or potentially for some - # filesystems? - os.chmod(path, stat.S_IWRITE|stat.S_IREAD) - self.remove_fsobj(pkgplan, path) + os.chmod(pdir, pmode) except Exception as e: - # Raise new exception chained to old. - six.raise_from(e, rm_exc) - finally: - # If parent directory wasn't image root, then assume - # mode needs reset. - if pmode is not None: - try: - os.chmod(pdir, pmode) - except Exception as e: - # Ignore failure to reset parent mode. - pass - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - index_list = [ - # this entry shows the hash as the 'index', and the - # file path as the 'value' when showing results when the - # user has searched for the SHA-1 hash. This seems unusual, - # but maintains the behaviour we had for S11. - ("file", "content", self.hash, self.hash), - # This will result in a 2nd row of output when searching for - # the SHA-1 hash, but is consistent with our behaviour for - # the other hash attributes. - ("file", "hash", self.hash, None), - ("file", "basename", os.path.basename(self.attrs["path"]), - None), - ("file", "path", os.path.sep + self.attrs["path"], None) - ] - for attr in digest.DEFAULT_HASH_ATTRS: - # We already have an index entry for self.hash; - # we only want hash attributes other than "hash". - hash = self.attrs.get(attr) - if attr != "hash" and hash is not None: - index_list.append(("file", attr, hash, None)) - return index_list - - def save_file(self, image, full_path): - """Save a file for later installation (in same process - invocation, if it exists).""" - - saved_name = image.temporary_file() - try: - misc.copyfile(full_path, saved_name) - except OSError as err: - if err.errno != errno.ENOENT: - raise - - # If the file doesn't exist, it can't be saved, so - # be certain consumers of this information know there - # isn't an original to restore. - saved_name = None - - ip = image.imageplan - ip.saved_files[self.attrs["save_file"][0]] = (self, saved_name) - - def restore_file(self, image): - """restore a previously saved file; return cached action """ - - ip = image.imageplan - orig, saved_name = ip.saved_files[self.attrs["save_file"][0]] - if saved_name is None: - # Nothing to restore; original file is missing. - return - - full_path = self.get_installed_path(image.get_root()) - assert not os.path.exists(full_path) - - misc.copyfile(saved_name, full_path) - os.unlink(saved_name) - - return orig - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action.""" - - errors = generic.Action._validate(self, fmri=fmri, - numeric_attrs=("pkg.csize", "pkg.size"), raise_errors=False, - required_attrs=("owner", "group"), single_attrs=("chash", - "preserve", "overlay", "elfarch", "elfbits", "elfhash", - "original_name", "preserve-version")) - errors.extend(self._validate_fsobj_common()) - - preserve = self.attrs.get("preserve") - preserve_version = self.attrs.get("preserve-version") - - if preserve_version: - if not preserve: - errors.append(("preserve-version", - _("preserve must be 'true' if a " - "preserve-version is specified"))) - - (release, build_release, branch, timestr), ignored = \ - version.Version.split(str(preserve_version)) - if not release: - errors.append(("preserve-version", - _("preserve-version must specify " - "the release"))) - if build_release != "": - errors.append(("preserve-version", - _("preserve-version must specify " - "the release"))) - if branch: - errors.append(("preserve-version", - _("preserve-version must not specify " - "the branch"))) - if timestr: - errors.append(("preserve-version", - _("preserve-version must not specify " - "the timestamp"))) - - if errors: - raise pkg.actions.InvalidActionAttributesError(self, - errors, fmri=fmri) - - if six.PY3: - def __init__(self, data, **attrs): - _common._file_init(self, data, **attrs) + # Ignore failure to reset parent mode. + pass + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + index_list = [ + # this entry shows the hash as the 'index', and the + # file path as the 'value' when showing results when the + # user has searched for the SHA-1 hash. This seems unusual, + # but maintains the behaviour we had for S11. + ("file", "content", self.hash, self.hash), + # This will result in a 2nd row of output when searching for + # the SHA-1 hash, but is consistent with our behaviour for + # the other hash attributes. + ("file", "hash", self.hash, None), + ("file", "basename", os.path.basename(self.attrs["path"]), None), + ("file", "path", os.path.sep + self.attrs["path"], None), + ] + for attr in digest.DEFAULT_HASH_ATTRS: + # We already have an index entry for self.hash; + # we only want hash attributes other than "hash". + hash = self.attrs.get(attr) + if attr != "hash" and hash is not None: + index_list.append(("file", attr, hash, None)) + return index_list + + def save_file(self, image, full_path): + """Save a file for later installation (in same process + invocation, if it exists).""" + + saved_name = image.temporary_file() + try: + misc.copyfile(full_path, saved_name) + except OSError as err: + if err.errno != errno.ENOENT: + raise + + # If the file doesn't exist, it can't be saved, so + # be certain consumers of this information know there + # isn't an original to restore. + saved_name = None + + ip = image.imageplan + ip.saved_files[self.attrs["save_file"][0]] = (self, saved_name) + + def restore_file(self, image): + """restore a previously saved file; return cached action""" + + ip = image.imageplan + orig, saved_name = ip.saved_files[self.attrs["save_file"][0]] + if saved_name is None: + # Nothing to restore; original file is missing. + return + + full_path = self.get_installed_path(image.get_root()) + assert not os.path.exists(full_path) + + misc.copyfile(saved_name, full_path) + os.unlink(saved_name) + + return orig + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action.""" + + errors = generic.Action._validate( + self, + fmri=fmri, + numeric_attrs=("pkg.csize", "pkg.size"), + raise_errors=False, + required_attrs=("owner", "group"), + single_attrs=( + "chash", + "preserve", + "overlay", + "elfarch", + "elfbits", + "elfhash", + "original_name", + "preserve-version", + ), + ) + errors.extend(self._validate_fsobj_common()) + + preserve = self.attrs.get("preserve") + preserve_version = self.attrs.get("preserve-version") + + if preserve_version: + if not preserve: + errors.append( + ( + "preserve-version", + _( + "preserve must be 'true' if a " + "preserve-version is specified" + ), + ) + ) + + ( + release, + build_release, + branch, + timestr, + ), ignored = version.Version.split(str(preserve_version)) + if not release: + errors.append( + ( + "preserve-version", + _("preserve-version must specify " "the release"), + ) + ) + if build_release != "": + errors.append( + ( + "preserve-version", + _("preserve-version must specify " "the release"), + ) + ) + if branch: + errors.append( + ( + "preserve-version", + _("preserve-version must not specify " "the branch"), + ) + ) + if timestr: + errors.append( + ( + "preserve-version", + _("preserve-version must not specify " "the timestamp"), + ) + ) + + if errors: + raise pkg.actions.InvalidActionAttributesError( + self, errors, fmri=fmri + ) + + if six.PY3: + + def __init__(self, data, **attrs): + _common._file_init(self, data, **attrs) + if six.PY2: - FileAction.__init__ = types.MethodType(_common._file_init, None, FileAction) + FileAction.__init__ = types.MethodType(_common._file_init, None, FileAction) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/generic.py b/src/modules/actions/generic.py index 00976b717..7f3c2869e 100644 --- a/src/modules/actions/generic.py +++ b/src/modules/actions/generic.py @@ -33,10 +33,10 @@ import os try: - # Some versions of python don't have these constants. - os.SEEK_SET + # Some versions of python don't have these constants. + os.SEEK_SET except AttributeError: - os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = range(3) + os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = range(3) import six import stat import types @@ -57,1226 +57,1304 @@ # users and groups that may be delivered during the same operation); this # implies that /etc/group and /etc/passwd file ownership needs to be part of # initial contents of those files. -_orderdict = dict((k, i) for i, k in enumerate(( - "set", - "depend", - "group", - "user", - "dir", - "file", - "hardlink", - "link", - "driver", - "unknown", - "license", - "legacy", - "signature" -))) +_orderdict = dict( + (k, i) + for i, k in enumerate( + ( + "set", + "depend", + "group", + "user", + "dir", + "file", + "hardlink", + "link", + "driver", + "unknown", + "license", + "legacy", + "signature", + ) + ) +) # EmptyI for argument defaults; no import to avoid pkg.misc dependency. EmptyI = tuple() + def quote_attr_value(s): - """Returns a properly quoted version of the provided string suitable for - use as an attribute value for actions in string form.""" + """Returns a properly quoted version of the provided string suitable for + use as an attribute value for actions in string form.""" + + if " " in s or "'" in s or '"' in s or s == "": + if '"' not in s: + return '"{0}"'.format(s) + elif "'" not in s: + return "'{0}'".format(s) + return '"{0}"'.format(s.replace('"', '\\"')) + return s - if " " in s or "'" in s or "\"" in s or s == "": - if "\"" not in s: - return '"{0}"'.format(s) - elif "'" not in s: - return "'{0}'".format(s) - return '"{0}"'.format(s.replace("\"", "\\\"")) - return s class NSG(type): - """This metaclass automatically assigns a subclass of Action a - namespace_group member if it hasn't already been specified. This is a - convenience for classes which are the sole members of their group and - don't want to hardcode something arbitrary and unique.""" - - __nsg = 0 - def __new__(mcs, name, bases, dict): - nsg = None - - # We only look at subclasses of Action, and we ignore multiple - # inheritance. - if name != "Action" and issubclass(bases[0], Action): - # Iterate through the inheritance chain to see if any - # parent class has a namespace_group member, and grab - # its value. - for c in bases[0].__mro__: - if c == Action: - break - nsg = getattr(c, "namespace_group", None) - if nsg is not None: - break - - # If the class didn't have a namespace_group member - # already, assign one. If we found one in our traversal - # above, use that, otherwise make one up. - if "namespace_group" not in dict: - if not nsg: - nsg = NSG.__nsg - # Prepare for the next class. - NSG.__nsg += 1 - dict["namespace_group"] = nsg - - return type.__new__(mcs, name, bases, dict) - - @staticmethod - def getstate(obj, je_state=None): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - return str(obj) - - @staticmethod - def fromstate(state, jd_state=None): - """Allocate a new object using previously serialized state - obtained via getstate().""" - return pkg.actions.fromstr(state) + """This metaclass automatically assigns a subclass of Action a + namespace_group member if it hasn't already been specified. This is a + convenience for classes which are the sole members of their group and + don't want to hardcode something arbitrary and unique.""" + + __nsg = 0 + + def __new__(mcs, name, bases, dict): + nsg = None + + # We only look at subclasses of Action, and we ignore multiple + # inheritance. + if name != "Action" and issubclass(bases[0], Action): + # Iterate through the inheritance chain to see if any + # parent class has a namespace_group member, and grab + # its value. + for c in bases[0].__mro__: + if c == Action: + break + nsg = getattr(c, "namespace_group", None) + if nsg is not None: + break + + # If the class didn't have a namespace_group member + # already, assign one. If we found one in our traversal + # above, use that, otherwise make one up. + if "namespace_group" not in dict: + if not nsg: + nsg = NSG.__nsg + # Prepare for the next class. + NSG.__nsg += 1 + dict["namespace_group"] = nsg + + return type.__new__(mcs, name, bases, dict) + + @staticmethod + def getstate(obj, je_state=None): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + return str(obj) + + @staticmethod + def fromstate(state, jd_state=None): + """Allocate a new object using previously serialized state + obtained via getstate().""" + return pkg.actions.fromstr(state) # metaclass-assignment; pylint: disable=W1623 @six.add_metaclass(NSG) class Action(object): - """Class representing a generic packaging object. - - An Action is a very simple wrapper around two dictionaries: a named set - of data streams and a set of attributes. Data streams generally - represent files on disk, and attributes represent metadata about those - files. - """ - - __slots__ = ["attrs", "data"] - - # 'name' is the name of the action, as specified in a manifest. - name = "generic" - # 'key_attr' is the name of the attribute whose value must be unique in - # the namespace of objects represented by a particular action. For - # instance, a file's key_attr would be its pathname. Or a driver's - # key_attr would be the driver name. When 'key_attr' is None, it means - # that all attributes of the action are distinguishing. - key_attr = None - # 'globally_identical' is True if all actions representing a single - # object on a system must be identical. - globally_identical = False - # 'refcountable' is True if the action type can safely be delivered - # multiple times. - refcountable = False - # 'namespace_group' is a string whose value is shared by actions which - # share a common namespace. As a convenience to the classes which are - # the sole members of their group, this is set to a non-None value for - # subclasses by the NSG metaclass. - namespace_group = None - # 'ordinality' is a numeric value that is used during action comparison - # to determine action sorting. - ordinality = _orderdict["unknown"] - # 'unique_attrs' is a tuple listing the attributes which must be - # identical in order for an action to be safely delivered multiple times - # (for those that can be). - unique_attrs = () - - # The version of signature used. - sig_version = 0 - # Most types of actions do not have a payload. - has_payload = False - - # Python 3 will ignore the __metaclass__ field, but it's still useful - # for class attribute access. - __metaclass__ = NSG - - # __init__ is provided as a native function (see end of class - # declaration). - - def set_data(self, data): - """This function sets the data field of the action. - - The "data" parameter is the file to use to set the data field. - It can be a string which is the path to the file, a function - which provides the file when called, or a file handle to the - file.""" - - if data is None: - self.data = None - return - - if isinstance(data, six.string_types): - if not os.path.exists(data): - raise pkg.actions.ActionDataError( - _("No such file: '{0}'.").format(data), - path=data) - elif os.path.isdir(data): - raise pkg.actions.ActionDataError( - _("'{0}' is not a file.").format(data), - path=data) - - def file_opener(): - return open(data, "rb") - self.data = file_opener - if "pkg.size" not in self.attrs: - try: - fs = os.stat(data) - self.attrs["pkg.size"] = str(fs.st_size) - except EnvironmentError as e: - raise \ - pkg.actions.ActionDataError( - e, path=data) - return - - if hasattr(data, "__call__"): - # Data is not None, and is callable. - self.data = data - return - - if "pkg.size" in self.attrs: - self.data = lambda: data - return - + """Class representing a generic packaging object. + + An Action is a very simple wrapper around two dictionaries: a named set + of data streams and a set of attributes. Data streams generally + represent files on disk, and attributes represent metadata about those + files. + """ + + __slots__ = ["attrs", "data"] + + # 'name' is the name of the action, as specified in a manifest. + name = "generic" + # 'key_attr' is the name of the attribute whose value must be unique in + # the namespace of objects represented by a particular action. For + # instance, a file's key_attr would be its pathname. Or a driver's + # key_attr would be the driver name. When 'key_attr' is None, it means + # that all attributes of the action are distinguishing. + key_attr = None + # 'globally_identical' is True if all actions representing a single + # object on a system must be identical. + globally_identical = False + # 'refcountable' is True if the action type can safely be delivered + # multiple times. + refcountable = False + # 'namespace_group' is a string whose value is shared by actions which + # share a common namespace. As a convenience to the classes which are + # the sole members of their group, this is set to a non-None value for + # subclasses by the NSG metaclass. + namespace_group = None + # 'ordinality' is a numeric value that is used during action comparison + # to determine action sorting. + ordinality = _orderdict["unknown"] + # 'unique_attrs' is a tuple listing the attributes which must be + # identical in order for an action to be safely delivered multiple times + # (for those that can be). + unique_attrs = () + + # The version of signature used. + sig_version = 0 + # Most types of actions do not have a payload. + has_payload = False + + # Python 3 will ignore the __metaclass__ field, but it's still useful + # for class attribute access. + __metaclass__ = NSG + + # __init__ is provided as a native function (see end of class + # declaration). + + def set_data(self, data): + """This function sets the data field of the action. + + The "data" parameter is the file to use to set the data field. + It can be a string which is the path to the file, a function + which provides the file when called, or a file handle to the + file.""" + + if data is None: + self.data = None + return + + if isinstance(data, six.string_types): + if not os.path.exists(data): + raise pkg.actions.ActionDataError( + _("No such file: '{0}'.").format(data), path=data + ) + elif os.path.isdir(data): + raise pkg.actions.ActionDataError( + _("'{0}' is not a file.").format(data), path=data + ) + + def file_opener(): + return open(data, "rb") + + self.data = file_opener + if "pkg.size" not in self.attrs: try: - sz = data.size - except AttributeError: - try: - try: - sz = os.fstat(data.fileno()).st_size - except (AttributeError, TypeError): - try: - try: - data.seek(0, - os.SEEK_END) - sz = data.tell() - data.seek(0) - except (AttributeError, - TypeError): - d = data.read() - sz = len(d) - data = BytesIO(d) - except (AttributeError, TypeError): - # Raw data was provided; fake a - # file object. - sz = len(data) - data = BytesIO(data) - except EnvironmentError as e: - raise pkg.actions.ActionDataError(e) - - self.attrs["pkg.size"] = str(sz) - self.data = lambda: data - - def __str__(self): - """Serialize the action into manifest form. - - The form is the name, followed by the SHA1 hash, if it exists, - (this use of a positional SHA1 hash is deprecated, with - pkg.*hash.* attributes being preferred over positional hashes) - followed by attributes in the form 'key=value'. All fields are - space-separated; fields with spaces in the values are quoted. - - Note that an object with a datastream may have been created in - such a way that the hash field is not populated, or not - populated with real data. The action classes do not guarantee - that at the time that __str__() is called, the hash is properly - computed. This may need to be done externally. - """ - - sattrs = list(self.attrs.keys()) - out = self.name + fs = os.stat(data) + self.attrs["pkg.size"] = str(fs.st_size) + except EnvironmentError as e: + raise pkg.actions.ActionDataError(e, path=data) + return + + if hasattr(data, "__call__"): + # Data is not None, and is callable. + self.data = data + return + + if "pkg.size" in self.attrs: + self.data = lambda: data + return + + try: + sz = data.size + except AttributeError: + try: try: - h = self.hash - if h: - if "=" not in h and " " not in h and \ - '"' not in h: - out += " " + h - else: - sattrs.append("hash") - except AttributeError: - # No hash to stash. - pass - - # Sort so that we get consistent action attribute ordering. - # We pay a performance penalty to do so, but it seems worth it. - sattrs.sort() - - for k in sattrs: - # Octal literal in Python 3 begins with "0o", such as - # "0o755", but we want to keep "0755" in the output. - if k == "mode" and isinstance(self.attrs[k], str) and \ - self.attrs[k].startswith("0o"): - self.attrs[k] = "0" + self.attrs[k][2:] + sz = os.fstat(data.fileno()).st_size + except (AttributeError, TypeError): + try: try: - v = self.attrs[k] - except KeyError: - # If we can't find the attribute, it must be the - # hash. 'h' will only be in scope if the block - # at the start succeeded. - v = h - - if type(v) is list or type(v) is set: - out += " " + " ".join([ - "=".join((k, quote_attr_value(lmt))) - for lmt in v - ]) - # Quote values containing whitespaces or macros - elif " " in v or "'" in v or "\"" in v or v == "" or "$(" in v: - if "\"" not in v: - out += " " + k + "=\"" + v + "\"" - elif "'" not in v: - out += " " + k + "='" + v + "'" - else: - out += " " + k + "=\"" + \ - v.replace("\"", "\\\"") + "\"" - else: - out += " " + k + "=" + v - - return out - - def __repr__(self): - return "<{0} object at {1:#x}: {2}>".format(self.__class__, - id(self), self) - - def sig_str(self, a, ver): - """Create a stable string representation of an action that - is deterministic in its creation. If creating a string from an - action is non-deterministic, then manifest signing cannot work. - - The parameter "a" is the signature action that's going to use - the string produced. It's needed for the signature string - action, and is here to keep the method signature the same. - """ - - # Any changes to this function or any subclasses sig_str mean - # Action.sig_version must be incremented. - - if ver != Action.sig_version: - raise apx.UnsupportedSignatureVersion(ver, sig=self) - - out = self.name - if hasattr(self, "hash") and self.hash is not None: - out += " " + self.hash - - def q(s): - if " " in s or "'" in s or "\"" in s or s == "" or "$(" in s: - if "\"" not in s: - return '"{0}"'.format(s) - elif "'" not in s: - return "'{0}'".format(s) - else: - return '"{0}"'.format( - s.replace("\"", "\\\"")) - else: - return s - - # Sort so that we get consistent action attribute ordering. - # We pay a performance penalty to do so, but it seems worth it. - for k in sorted(self.attrs.keys()): - v = self.attrs[k] - # Octal literal in Python 3 begins with "0o", such as - # "0o755", but we want to keep "0755" in the output. - if k == "mode" and v.startswith("0o"): - self.attrs[k] = "0" + v[2:] - if type(v) is list: - out += " " + " ".join([ - "{0}={1}".format(k, q(lmt)) for lmt in sorted(v) - ]) - elif " " in v or "'" in v or "\"" in v or v == "" or "$(" in v: - if "\"" not in v: - out += " " + k + "=\"" + v + "\"" - elif "'" not in v: - out += " " + k + "='" + v + "'" - else: - out += " " + k + "=\"" + \ - v.replace("\"", "\\\"") + "\"" - else: - out += " " + k + "=" + v - - return out - - def __eq__(self, other): - if self.name == other.name and \ - getattr(self, "hash", None) == \ - getattr(other, "hash", None) and \ - self.attrs == other.attrs: - return True - return False + data.seek(0, os.SEEK_END) + sz = data.tell() + data.seek(0) + except (AttributeError, TypeError): + d = data.read() + sz = len(d) + data = BytesIO(d) + except (AttributeError, TypeError): + # Raw data was provided; fake a + # file object. + sz = len(data) + data = BytesIO(data) + except EnvironmentError as e: + raise pkg.actions.ActionDataError(e) + + self.attrs["pkg.size"] = str(sz) + self.data = lambda: data + + def __str__(self): + """Serialize the action into manifest form. + + The form is the name, followed by the SHA1 hash, if it exists, + (this use of a positional SHA1 hash is deprecated, with + pkg.*hash.* attributes being preferred over positional hashes) + followed by attributes in the form 'key=value'. All fields are + space-separated; fields with spaces in the values are quoted. + + Note that an object with a datastream may have been created in + such a way that the hash field is not populated, or not + populated with real data. The action classes do not guarantee + that at the time that __str__() is called, the hash is properly + computed. This may need to be done externally. + """ - def __ne__(self, other): - if self.name == other.name and \ - getattr(self, "hash", None) == \ - getattr(other, "hash", None) and \ - self.attrs == other.attrs: - return False - return True + sattrs = list(self.attrs.keys()) + out = self.name + try: + h = self.hash + if h: + if "=" not in h and " " not in h and '"' not in h: + out += " " + h + else: + sattrs.append("hash") + except AttributeError: + # No hash to stash. + pass + + # Sort so that we get consistent action attribute ordering. + # We pay a performance penalty to do so, but it seems worth it. + sattrs.sort() + + for k in sattrs: + # Octal literal in Python 3 begins with "0o", such as + # "0o755", but we want to keep "0755" in the output. + if ( + k == "mode" + and isinstance(self.attrs[k], str) + and self.attrs[k].startswith("0o") + ): + self.attrs[k] = "0" + self.attrs[k][2:] + try: + v = self.attrs[k] + except KeyError: + # If we can't find the attribute, it must be the + # hash. 'h' will only be in scope if the block + # at the start succeeded. + v = h + + if type(v) is list or type(v) is set: + out += " " + " ".join( + ["=".join((k, quote_attr_value(lmt))) for lmt in v] + ) + # Quote values containing whitespaces or macros + elif " " in v or "'" in v or '"' in v or v == "" or "$(" in v: + if '"' not in v: + out += " " + k + '="' + v + '"' + elif "'" not in v: + out += " " + k + "='" + v + "'" + else: + out += " " + k + '="' + v.replace('"', '\\"') + '"' + else: + out += " " + k + "=" + v - def __hash__(self): - return hash(id(self)) - - def compare(self, other): - return (id(self) > id(other)) - (id(self) < id(other)) - - def __lt__(self, other): - if self.ordinality == other.ordinality: - if self.compare(other) < 0: # often subclassed - return True - else: - return False - return self.ordinality < other.ordinality - - def __gt__(self, other): - if self.ordinality == other.ordinality: - if self.compare(other) > 0: # often subclassed - return True - else: - return False - return self.ordinality > other.ordinality - - def __le__(self, other): - return self == other or self < other - - def __ge__(self, other): - return self == other or self > other - - def different(self, other, pkgplan=None, cmp_policy=None): - """Returns True if other represents a non-ignorable change from - self. By default, this means two actions are different if any - of their attributes are different. - - When cmp_policy is CMP_UNSIGNED, check the unsigned versions - of hashes instead of signed versions of hashes on both actions. - This prevents comparing all hash attributes as simple value - comparisons, and instead compares only non-hash attributes, - then tests the most preferred hash for equivalence. When - cmp_policy is CMP_ALL, compare using all attributes. - """ - - if self.has_payload != other.has_payload: - # Comparing different action types. - return True + return out - sattrs = self.attrs - oattrs = other.attrs - - # Are all attributes identical? Most actions don't change, so - # a simple equality comparison should be sufficient. - if sattrs == oattrs: - if self.has_payload: - # If payload present, must also compare some - # object attributes. - if self.hash == other.hash: - return False - else: - return False - - # If action has payload, perform hash comparison first. For - # actions with a payload, hash attributes usually change, but - # other attributes do not. - hash_type = digest.HASH - if self.has_payload: - if "elfarch" in self.attrs and "elfarch" in other.attrs: - # If both actions are for elf files, determine - # if we should compare based on elf content - # hash. - if cmp_policy == CMP_UNSIGNED and not pkgplan: - # If caller requested unsigned - # comparison, and no policy is - # available, compare based on elf - # content hash. - hash_type = digest.HASH_GELF - elif pkgplan: - # Avoid circular import. - from pkg.client.imageconfig \ - import CONTENT_UPDATE_POLICY - - if pkgplan.image.cfg.get_policy_str( - CONTENT_UPDATE_POLICY) == \ - "when-required": - # If policy is available and - # allows it, then compare based - # on elf content hash. - hash_type = digest.HASH_GELF - - # digest.get_common_preferred_hash() tries to return the - # most preferred hash attribute and falls back to - # returning the action.hash values if there are no other - # common hash attributes, and will throw an - # AttributeError if one or the other actions don't have - # an action.hash attribute. - try: - hash_attr, shash, ohash, hash_func = \ - digest.get_common_preferred_hash( - self, other, hash_type=hash_type, - cmp_policy=cmp_policy) - if shash != ohash: - return True - # If there's no common preferred hash, we have - # to treat these actions as different. - if shash is None and ohash is None: - return True - except AttributeError: - # If action.hash is set on exactly one of self - # and other, then we're trying to compare - # actions of disparate subclasses. - if hasattr(self, "hash") ^ hasattr(other, - "hash"): - raise AssertionError( - "attempt to compare a " - "{0} action to a {1} action".format( - self.name, other.name)) - - if self.has_payload and cmp_policy != CMP_ALL: - sset = frozenset( - a for a in sattrs if not digest.is_hash_attr(a)) - oset = frozenset( - a for a in oattrs if not digest.is_hash_attr(a)) - else: - sset = frozenset(sattrs) - oset = frozenset(oattrs) + def __repr__(self): + return "<{0} object at {1:#x}: {2}>".format( + self.__class__, id(self), self + ) - # If hashes were equal or not applicable, then compare remaining - # attributes. - if sset.symmetric_difference(oset): - return True + def sig_str(self, a, ver): + """Create a stable string representation of an action that + is deterministic in its creation. If creating a string from an + action is non-deterministic, then manifest signing cannot work. - for a in sset: - x = sattrs[a] - y = oattrs[a] - if x != y: - if len(x) == len(y) and \ - type(x) is list and type(y) is list: - if sorted(x) != sorted(y): - return True - else: - return True + The parameter "a" is the signature action that's going to use + the string produced. It's needed for the signature string + action, and is here to keep the method signature the same. + """ - return False + # Any changes to this function or any subclasses sig_str mean + # Action.sig_version must be incremented. - def differences(self, other): - """Returns the attributes that have different values between - other and self.""" - sset = set(self.attrs.keys()) - oset = set(other.attrs.keys()) - l = sset.symmetric_difference(oset) - for k in sset & oset: # over attrs in both dicts - if type(self.attrs[k]) == list and \ - type(other.attrs[k]) == list: - if sorted(self.attrs[k]) != sorted(other.attrs[k]): - l.add(k) - elif self.attrs[k] != other.attrs[k]: - l.add(k) - return (l) - - def consolidate_attrs(self): - """Removes duplicate values from values which are lists.""" - for k in self.attrs: - if isinstance(self.attrs[k], list): - self.attrs[k] = list(set(self.attrs[k])) - - def generate_indices(self): - """Generate the information needed to index this action. - - This method, and the overriding methods in subclasses, produce - a list of four-tuples. The tuples are of the form - (action_name, key, token, full value). action_name is the - string representation of the kind of action generating the - tuple. 'file' and 'depend' are two examples. It is required to - not be None. Key is the string representation of the name of - the attribute being indexed. Examples include 'basename' and - 'path'. Token is the token to be searched against. Full value - is the value to display to the user in the event this token - matches their query. This is useful for things like categories - where what matched the query may be a substring of what the - desired user output is. - """ - - # Indexing based on the SHA-1 hash is enough for the generic - # case. - if hasattr(self, "hash"): - return [ - (self.name, "content", self.hash, self.hash), - ] - return [] - - def get_installed_path(self, img_root): - """Given an image root, return the installed path of the action - if it has a installable payload (i.e. 'path' attribute).""" - try: - return os.path.normpath(os.path.join(img_root, - self.attrs["path"])) - except KeyError: - return - - def distinguished_name(self): - """ Return the distinguishing name for this action, - preceded by the type of the distinguishing name. For - example, for a file action, 'path' might be the - key_attr. So, the distinguished name might be - "path: usr/lib/libc.so.1". - """ - - if self.key_attr is None: - return str(self) - return "{0}: {1}".format( - self.name, self.attrs.get(self.key_attr, "???")) - - def makedirs(self, path, **kw): - """Make directory specified by 'path' with given permissions, as - well as all missing parent directories. Permissions are - specified by the keyword arguments 'mode', 'uid', and 'gid'. - - The difference between this and os.makedirs() is that the - permissions specify only those of the leaf directory. Missing - parent directories inherit the permissions of the deepest - existing directory. The leaf directory will also inherit any - permissions not explicitly set.""" - - # generate the components of the path. The first - # element will be empty since all absolute paths - # always start with a root specifier. - pathlist = portable.split_path(path) - - # Fill in the first path with the root of the filesystem - # (this ends up being something like C:\ on windows systems, - # and "/" on unix. - pathlist[0] = portable.get_root(path) - - g = enumerate(pathlist) - for i, e in g: - # os.path.isdir() follows links, which isn't - # desirable here. - p = os.path.join(*pathlist[:i + 1]) - try: - fs = os.lstat(p) - except OSError as e: - if e.errno == errno.ENOENT: - break - raise - - if not stat.S_ISDIR(fs.st_mode): - if p == path: - # Allow caller to handle target by - # letting the operation continue, - # and whatever error is encountered - # being raised to the caller. - break - - err_txt = _("Unable to create {path}; a " - "parent directory {p} has been replaced " - "with a file or link. Please restore the " - "parent directory and try again.").format( - **locals()) - raise apx.ActionExecutionError(self, - details=err_txt, error=e, - fmri=kw.get("fmri")) + if ver != Action.sig_version: + raise apx.UnsupportedSignatureVersion(ver, sig=self) + + out = self.name + if hasattr(self, "hash") and self.hash is not None: + out += " " + self.hash + + def q(s): + if " " in s or "'" in s or '"' in s or s == "" or "$(" in s: + if '"' not in s: + return '"{0}"'.format(s) + elif "'" not in s: + return "'{0}'".format(s) else: - # XXX Because the filelist codepath may create - # directories with incorrect permissions (see - # pkgtarfile.py), we need to correct those permissions - # here. Note that this solution relies on all - # intermediate directories being explicitly created by - # the packaging system; otherwise intermediate - # directories will not get their permissions corrected. - fs = os.lstat(path) - mode = kw.get("mode", fs.st_mode) - uid = kw.get("uid", fs.st_uid) - gid = kw.get("gid", fs.st_gid) - try: - if mode != fs.st_mode: - os.chmod(path, mode) - if uid != fs.st_uid or gid != fs.st_gid: - portable.chown(path, uid, gid) - except OSError as e: - if e.errno != errno.EPERM and \ - e.errno != errno.ENOSYS: - raise - return - - fs = os.stat(os.path.join(*pathlist[:i])) - for i, e in g: - p = os.path.join(*pathlist[:i]) - try: - os.mkdir(p, fs.st_mode) - except OSError as e: - if e.errno != errno.ENOTDIR: - raise - err_txt = _("Unable to create {path}; a " - "parent directory {p} has been replaced " - "with a file or link. Please restore the " - "parent directory and try again.").format( - **locals()) - raise apx.ActionExecutionError(self, - details=err_txt, error=e, - fmri=kw.get("fmri")) - - os.chmod(p, fs.st_mode) - try: - portable.chown(p, fs.st_uid, fs.st_gid) - except OSError as e: - if e.errno != errno.EPERM: - raise - - # Create the leaf with any requested permissions, substituting - # missing perms with the parent's perms. - mode = kw.get("mode", fs.st_mode) - uid = kw.get("uid", fs.st_uid) - gid = kw.get("gid", fs.st_gid) - os.mkdir(path, mode) - os.chmod(path, mode) - try: - portable.chown(path, uid, gid) - except OSError as e: - if e.errno != errno.EPERM: - raise - - def get_varcet_keys(self): - """Return the names of any facet or variant tags in this - action.""" - - # Hot path; grab reference to attrs and use list comprehensions - # to construct the results. This is faster than iterating over - # attrs once and appending to two lists separately. - attrs = self.attrs - return [k for k in attrs if k[:8] == "variant."], \ - [k for k in attrs if k[:6] == "facet."] - - def get_variant_template(self): - """Return the VariantCombinationTemplate that the variant tags - of this action define.""" - - return variant.VariantCombinationTemplate(dict(( - (v, self.attrs[v]) for v in self.get_varcet_keys()[0] - ))) - - def strip(self, preserve=EmptyDict): - """Strip actions of attributes which are unnecessary once - those actions have been installed in an image. Stripped - actions are saved in an images stripped action cache and used - for conflicting actions checks during image planning - operations.""" - - for key in list(self.attrs.keys()): - # strip out variant and facet information - if key[:8] == "variant." or key[:6] == "facet.": - del self.attrs[key] - continue - # keep unique attributes - if not self.unique_attrs or key in self.unique_attrs: - continue - # keep file action overlay attributes - if self.name == "file" and key == "overlay": - continue - # keep file action overlay-attributes attributes - if self.name == "file" and key == "overlay-attributes": - continue - # keep specified keys - if key in preserve.get(self.name, []): - continue - # keep link/hardlink action mediator attributes - if (self.name == "link" or self.name == "hardlink") \ - and key[:8] == "mediator": - continue - del self.attrs[key] - - def strip_variants(self): - """Remove all variant tags from the attrs dictionary.""" - - for k in list(self.attrs.keys()): - if k.startswith("variant."): - del self.attrs[k] - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - return [], [], [] - - def _validate_fsobj_common(self): - """Private, common validation logic for filesystem objects that - returns a list of tuples of the form (attr_name, error_message). - """ - - errors = [] - - bad_mode = False - raw_mode = self.attrs.get("mode") - if not raw_mode or isinstance(raw_mode, list): - bad_mode = True + return '"{0}"'.format(s.replace('"', '\\"')) + else: + return s + + # Sort so that we get consistent action attribute ordering. + # We pay a performance penalty to do so, but it seems worth it. + for k in sorted(self.attrs.keys()): + v = self.attrs[k] + # Octal literal in Python 3 begins with "0o", such as + # "0o755", but we want to keep "0755" in the output. + if k == "mode" and v.startswith("0o"): + self.attrs[k] = "0" + v[2:] + if type(v) is list: + out += " " + " ".join( + ["{0}={1}".format(k, q(lmt)) for lmt in sorted(v)] + ) + elif " " in v or "'" in v or '"' in v or v == "" or "$(" in v: + if '"' not in v: + out += " " + k + '="' + v + '"' + elif "'" not in v: + out += " " + k + "='" + v + "'" else: - mlen = len(raw_mode) - # Common case for our packages is 4 so place that first. - if not (mlen == 4 or mlen == 3 or mlen == 5): - bad_mode = True - elif mlen == 5 and raw_mode[0] != "0": - bad_mode = True - - # The group, mode, and owner attributes are intentionally only - # required during publication as it is anticipated that the - # there will eventually be defaults for these (possibly parent - # directory, etc.). By only requiring these attributes here, - # it prevents publication of packages for which no default - # currently exists, while permitting future changes to remove - # that limitaiton and use sane defaults. - if not bad_mode: - try: - mode = str(int(raw_mode, 8)) - except (TypeError, ValueError): - bad_mode = True - else: - bad_mode = mode == "" - - if bad_mode: - if not raw_mode: - errors.append(("mode", _("mode is required; " - "value must be of the form '644', " - "'0644', or '04755'."))) - elif isinstance(raw_mode, list): - errors.append(("mode", _("mode may only be " - "specified once"))) - else: - errors.append(("mode", _("'{0}' is not a valid " - "mode; value must be of the form '644', " - "'0644', or '04755'.").format(raw_mode))) + out += " " + k + '="' + v.replace('"', '\\"') + '"' + else: + out += " " + k + "=" + v + + return out + + def __eq__(self, other): + if ( + self.name == other.name + and getattr(self, "hash", None) == getattr(other, "hash", None) + and self.attrs == other.attrs + ): + return True + return False + + def __ne__(self, other): + if ( + self.name == other.name + and getattr(self, "hash", None) == getattr(other, "hash", None) + and self.attrs == other.attrs + ): + return False + return True + + def __hash__(self): + return hash(id(self)) + + def compare(self, other): + return (id(self) > id(other)) - (id(self) < id(other)) + + def __lt__(self, other): + if self.ordinality == other.ordinality: + if self.compare(other) < 0: # often subclassed + return True + else: + return False + return self.ordinality < other.ordinality - try: - owner = self.attrs.get("owner", "").rstrip() - except AttributeError: - errors.append(("owner", _("owner may only be specified " - "once"))) + def __gt__(self, other): + if self.ordinality == other.ordinality: + if self.compare(other) > 0: # often subclassed + return True + else: + return False + return self.ordinality > other.ordinality - try: - group = self.attrs.get("group", "").rstrip() - except AttributeError: - errors.append(("group", _("group may only be specified " - "once"))) + def __le__(self, other): + return self == other or self < other - return errors + def __ge__(self, other): + return self == other or self > other - def get_fsobj_uid_gid(self, pkgplan, fmri): - """Returns a tuple of the form (owner, group) containing the uid - and gid of the filesystem object. If the attributes are missing - or invalid, an InvalidActionAttributesError exception will be - raised.""" + def different(self, other, pkgplan=None, cmp_policy=None): + """Returns True if other represents a non-ignorable change from + self. By default, this means two actions are different if any + of their attributes are different. + + When cmp_policy is CMP_UNSIGNED, check the unsigned versions + of hashes instead of signed versions of hashes on both actions. + This prevents comparing all hash attributes as simple value + comparisons, and instead compares only non-hash attributes, + then tests the most preferred hash for equivalence. When + cmp_policy is CMP_ALL, compare using all attributes. + """ + + if self.has_payload != other.has_payload: + # Comparing different action types. + return True + + sattrs = self.attrs + oattrs = other.attrs + + # Are all attributes identical? Most actions don't change, so + # a simple equality comparison should be sufficient. + if sattrs == oattrs: + if self.has_payload: + # If payload present, must also compare some + # object attributes. + if self.hash == other.hash: + return False + else: + return False + + # If action has payload, perform hash comparison first. For + # actions with a payload, hash attributes usually change, but + # other attributes do not. + hash_type = digest.HASH + if self.has_payload: + if "elfarch" in self.attrs and "elfarch" in other.attrs: + # If both actions are for elf files, determine + # if we should compare based on elf content + # hash. + if cmp_policy == CMP_UNSIGNED and not pkgplan: + # If caller requested unsigned + # comparison, and no policy is + # available, compare based on elf + # content hash. + hash_type = digest.HASH_GELF + elif pkgplan: + # Avoid circular import. + from pkg.client.imageconfig import CONTENT_UPDATE_POLICY + + if ( + pkgplan.image.cfg.get_policy_str(CONTENT_UPDATE_POLICY) + == "when-required" + ): + # If policy is available and + # allows it, then compare based + # on elf content hash. + hash_type = digest.HASH_GELF + + # digest.get_common_preferred_hash() tries to return the + # most preferred hash attribute and falls back to + # returning the action.hash values if there are no other + # common hash attributes, and will throw an + # AttributeError if one or the other actions don't have + # an action.hash attribute. + try: + ( + hash_attr, + shash, + ohash, + hash_func, + ) = digest.get_common_preferred_hash( + self, other, hash_type=hash_type, cmp_policy=cmp_policy + ) + if shash != ohash: + return True + # If there's no common preferred hash, we have + # to treat these actions as different. + if shash is None and ohash is None: + return True + except AttributeError: + # If action.hash is set on exactly one of self + # and other, then we're trying to compare + # actions of disparate subclasses. + if hasattr(self, "hash") ^ hasattr(other, "hash"): + raise AssertionError( + "attempt to compare a " + "{0} action to a {1} action".format( + self.name, other.name + ) + ) + + if self.has_payload and cmp_policy != CMP_ALL: + sset = frozenset(a for a in sattrs if not digest.is_hash_attr(a)) + oset = frozenset(a for a in oattrs if not digest.is_hash_attr(a)) + else: + sset = frozenset(sattrs) + oset = frozenset(oattrs) + + # If hashes were equal or not applicable, then compare remaining + # attributes. + if sset.symmetric_difference(oset): + return True + + for a in sset: + x = sattrs[a] + y = oattrs[a] + if x != y: + if len(x) == len(y) and type(x) is list and type(y) is list: + if sorted(x) != sorted(y): + return True + else: + return True + + return False + + def differences(self, other): + """Returns the attributes that have different values between + other and self.""" + sset = set(self.attrs.keys()) + oset = set(other.attrs.keys()) + l = sset.symmetric_difference(oset) + for k in sset & oset: # over attrs in both dicts + if type(self.attrs[k]) == list and type(other.attrs[k]) == list: + if sorted(self.attrs[k]) != sorted(other.attrs[k]): + l.add(k) + elif self.attrs[k] != other.attrs[k]: + l.add(k) + return l + + def consolidate_attrs(self): + """Removes duplicate values from values which are lists.""" + for k in self.attrs: + if isinstance(self.attrs[k], list): + self.attrs[k] = list(set(self.attrs[k])) + + def generate_indices(self): + """Generate the information needed to index this action. + + This method, and the overriding methods in subclasses, produce + a list of four-tuples. The tuples are of the form + (action_name, key, token, full value). action_name is the + string representation of the kind of action generating the + tuple. 'file' and 'depend' are two examples. It is required to + not be None. Key is the string representation of the name of + the attribute being indexed. Examples include 'basename' and + 'path'. Token is the token to be searched against. Full value + is the value to display to the user in the event this token + matches their query. This is useful for things like categories + where what matched the query may be a substring of what the + desired user output is. + """ - path = self.get_installed_path(pkgplan.image.get_root()) + # Indexing based on the SHA-1 hash is enough for the generic + # case. + if hasattr(self, "hash"): + return [ + (self.name, "content", self.hash, self.hash), + ] + return [] + + def get_installed_path(self, img_root): + """Given an image root, return the installed path of the action + if it has a installable payload (i.e. 'path' attribute).""" + try: + return os.path.normpath(os.path.join(img_root, self.attrs["path"])) + except KeyError: + return + + def distinguished_name(self): + """Return the distinguishing name for this action, + preceded by the type of the distinguishing name. For + example, for a file action, 'path' might be the + key_attr. So, the distinguished name might be + "path: usr/lib/libc.so.1". + """ - # The attribute may be missing. - owner = self.attrs.get("owner", "").rstrip() + if self.key_attr is None: + return str(self) + return "{0}: {1}".format( + self.name, self.attrs.get(self.key_attr, "???") + ) + + def makedirs(self, path, **kw): + """Make directory specified by 'path' with given permissions, as + well as all missing parent directories. Permissions are + specified by the keyword arguments 'mode', 'uid', and 'gid'. + + The difference between this and os.makedirs() is that the + permissions specify only those of the leaf directory. Missing + parent directories inherit the permissions of the deepest + existing directory. The leaf directory will also inherit any + permissions not explicitly set.""" + + # generate the components of the path. The first + # element will be empty since all absolute paths + # always start with a root specifier. + pathlist = portable.split_path(path) + + # Fill in the first path with the root of the filesystem + # (this ends up being something like C:\ on windows systems, + # and "/" on unix. + pathlist[0] = portable.get_root(path) + + g = enumerate(pathlist) + for i, e in g: + # os.path.isdir() follows links, which isn't + # desirable here. + p = os.path.join(*pathlist[: i + 1]) + try: + fs = os.lstat(p) + except OSError as e: + if e.errno == errno.ENOENT: + break + raise + + if not stat.S_ISDIR(fs.st_mode): + if p == path: + # Allow caller to handle target by + # letting the operation continue, + # and whatever error is encountered + # being raised to the caller. + break + + err_txt = _( + "Unable to create {path}; a " + "parent directory {p} has been replaced " + "with a file or link. Please restore the " + "parent directory and try again." + ).format(**locals()) + raise apx.ActionExecutionError( + self, details=err_txt, error=e, fmri=kw.get("fmri") + ) + else: + # XXX Because the filelist codepath may create + # directories with incorrect permissions (see + # pkgtarfile.py), we need to correct those permissions + # here. Note that this solution relies on all + # intermediate directories being explicitly created by + # the packaging system; otherwise intermediate + # directories will not get their permissions corrected. + fs = os.lstat(path) + mode = kw.get("mode", fs.st_mode) + uid = kw.get("uid", fs.st_uid) + gid = kw.get("gid", fs.st_gid) + try: + if mode != fs.st_mode: + os.chmod(path, mode) + if uid != fs.st_uid or gid != fs.st_gid: + portable.chown(path, uid, gid) + except OSError as e: + if e.errno != errno.EPERM and e.errno != errno.ENOSYS: + raise + return + + fs = os.stat(os.path.join(*pathlist[:i])) + for i, e in g: + p = os.path.join(*pathlist[:i]) + try: + os.mkdir(p, fs.st_mode) + except OSError as e: + if e.errno != errno.ENOTDIR: + raise + err_txt = _( + "Unable to create {path}; a " + "parent directory {p} has been replaced " + "with a file or link. Please restore the " + "parent directory and try again." + ).format(**locals()) + raise apx.ActionExecutionError( + self, details=err_txt, error=e, fmri=kw.get("fmri") + ) + + os.chmod(p, fs.st_mode) + try: + portable.chown(p, fs.st_uid, fs.st_gid) + except OSError as e: + if e.errno != errno.EPERM: + raise + + # Create the leaf with any requested permissions, substituting + # missing perms with the parent's perms. + mode = kw.get("mode", fs.st_mode) + uid = kw.get("uid", fs.st_uid) + gid = kw.get("gid", fs.st_gid) + os.mkdir(path, mode) + os.chmod(path, mode) + try: + portable.chown(path, uid, gid) + except OSError as e: + if e.errno != errno.EPERM: + raise + + def get_varcet_keys(self): + """Return the names of any facet or variant tags in this + action.""" + + # Hot path; grab reference to attrs and use list comprehensions + # to construct the results. This is faster than iterating over + # attrs once and appending to two lists separately. + attrs = self.attrs + return [k for k in attrs if k[:8] == "variant."], [ + k for k in attrs if k[:6] == "facet." + ] + + def get_variant_template(self): + """Return the VariantCombinationTemplate that the variant tags + of this action define.""" + + return variant.VariantCombinationTemplate( + dict(((v, self.attrs[v]) for v in self.get_varcet_keys()[0])) + ) + + def strip(self, preserve=EmptyDict): + """Strip actions of attributes which are unnecessary once + those actions have been installed in an image. Stripped + actions are saved in an images stripped action cache and used + for conflicting actions checks during image planning + operations.""" + + for key in list(self.attrs.keys()): + # strip out variant and facet information + if key[:8] == "variant." or key[:6] == "facet.": + del self.attrs[key] + continue + # keep unique attributes + if not self.unique_attrs or key in self.unique_attrs: + continue + # keep file action overlay attributes + if self.name == "file" and key == "overlay": + continue + # keep file action overlay-attributes attributes + if self.name == "file" and key == "overlay-attributes": + continue + # keep specified keys + if key in preserve.get(self.name, []): + continue + # keep link/hardlink action mediator attributes + if (self.name == "link" or self.name == "hardlink") and key[ + :8 + ] == "mediator": + continue + del self.attrs[key] + + def strip_variants(self): + """Remove all variant tags from the attrs dictionary.""" + + for k in list(self.attrs.keys()): + if k.startswith("variant."): + del self.attrs[k] + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + return [], [], [] + + def _validate_fsobj_common(self): + """Private, common validation logic for filesystem objects that + returns a list of tuples of the form (attr_name, error_message). + """ - # Now attempt to determine the uid and raise an appropriate - # exception if it can't be. + errors = [] + + bad_mode = False + raw_mode = self.attrs.get("mode") + if not raw_mode or isinstance(raw_mode, list): + bad_mode = True + else: + mlen = len(raw_mode) + # Common case for our packages is 4 so place that first. + if not (mlen == 4 or mlen == 3 or mlen == 5): + bad_mode = True + elif mlen == 5 and raw_mode[0] != "0": + bad_mode = True + + # The group, mode, and owner attributes are intentionally only + # required during publication as it is anticipated that the + # there will eventually be defaults for these (possibly parent + # directory, etc.). By only requiring these attributes here, + # it prevents publication of packages for which no default + # currently exists, while permitting future changes to remove + # that limitaiton and use sane defaults. + if not bad_mode: + try: + mode = str(int(raw_mode, 8)) + except (TypeError, ValueError): + bad_mode = True + else: + bad_mode = mode == "" + + if bad_mode: + if not raw_mode: + errors.append( + ( + "mode", + _( + "mode is required; " + "value must be of the form '644', " + "'0644', or '04755'." + ), + ) + ) + elif isinstance(raw_mode, list): + errors.append(("mode", _("mode may only be " "specified once"))) + else: + errors.append( + ( + "mode", + _( + "'{0}' is not a valid " + "mode; value must be of the form '644', " + "'0644', or '04755'." + ).format(raw_mode), + ) + ) + + try: + owner = self.attrs.get("owner", "").rstrip() + except AttributeError: + errors.append(("owner", _("owner may only be specified " "once"))) + + try: + group = self.attrs.get("group", "").rstrip() + except AttributeError: + errors.append(("group", _("group may only be specified " "once"))) + + return errors + + def get_fsobj_uid_gid(self, pkgplan, fmri): + """Returns a tuple of the form (owner, group) containing the uid + and gid of the filesystem object. If the attributes are missing + or invalid, an InvalidActionAttributesError exception will be + raised.""" + + path = self.get_installed_path(pkgplan.image.get_root()) + + # The attribute may be missing. + owner = self.attrs.get("owner", "").rstrip() + + # Now attempt to determine the uid and raise an appropriate + # exception if it can't be. + try: + owner = pkgplan.image.get_user_by_name(owner) + except KeyError: + if not owner: + # Owner was missing; let validate raise a more + # informative error. + self.validate(fmri=fmri) + + # Otherwise, the user is unknown; attempt to report why. + pd = pkgplan.image.imageplan.pd + if owner in pd.removed_users: + # What package owned the user that was removed? + src_fmri = pd.removed_users[owner] + + raise pkg.actions.InvalidActionAttributesError( + self, + [ + ( + "owner", + _( + "'{path}' cannot be " + "installed; the owner '{owner}' was " + "removed by '{src_fmri}'." + ).format(path=path, owner=owner, src_fmri=src_fmri), + ) + ], + fmri=fmri, + ) + elif owner in pd.added_users: + # This indicates an error on the part of the + # caller; the user should have been added + # before attempting to install the file. + raise + + # If this spot was reached, the user wasn't part of + # the operation plan and is completely unknown or + # invalid. + raise pkg.actions.InvalidActionAttributesError( + self, + [ + ( + "owner", + _( + "'{path}' cannot be " + "installed; '{owner}' is an unknown " + "or invalid user." + ).format(path=path, owner=owner), + ) + ], + fmri=fmri, + ) + + # The attribute may be missing. + group = self.attrs.get("group", "").rstrip() + + # Now attempt to determine the gid and raise an appropriate + # exception if it can't be. + try: + group = pkgplan.image.get_group_by_name(group) + except KeyError: + if not group: + # Group was missing; let validate raise a more + # informative error. + self.validate(fmri=pkgplan.destination_fmri) + + # Otherwise, the group is unknown; attempt to report + # why. + pd = pkgplan.image.imageplan.pd + if group in pd.removed_groups: + # What package owned the group that was removed? + src_fmri = pd.removed_groups[group] + + raise pkg.actions.InvalidActionAttributesError( + self, + [ + ( + "group", + _( + "'{path}' cannot be " + "installed; the group '{group}' was " + "removed by '{src_fmri}'." + ).format(path=path, group=group, src_fmri=src_fmri), + ) + ], + fmri=pkgplan.destination_fmri, + ) + elif group in pd.added_groups: + # This indicates an error on the part of the + # caller; the group should have been added + # before attempting to install the file. + raise + + # If this spot was reached, the group wasn't part of + # the operation plan and is completely unknown or + # invalid. + raise pkg.actions.InvalidActionAttributesError( + self, + [ + ( + "group", + _( + "'{path}' cannot be " + "installed; '{group}' is an unknown " + "or invalid group." + ).format(path=path, group=group), + ) + ], + fmri=pkgplan.destination_fmri, + ) + + return owner, group + + def verify_fsobj_common(self, img, ftype): + """Common verify logic for filesystem objects.""" + + errors = [] + warnings = [] + info = [] + + abort = False + + def ftype_to_name(ftype): + assert ftype is not None + tmap = { + stat.S_IFIFO: "fifo", + stat.S_IFCHR: "character device", + stat.S_IFDIR: "directory", + stat.S_IFBLK: "block device", + stat.S_IFREG: "regular file", + stat.S_IFLNK: "symbolic link", + stat.S_IFSOCK: "socket", + } + if ftype in tmap: + return tmap[ftype] + else: + return "Unknown (0x{0:x})".format(ftype) + + mode = owner = group = None + if ftype != stat.S_IFLNK: + if "mode" in self.attrs: + mode = int(self.attrs["mode"], 8) + if "owner" in self.attrs: + owner = self.attrs["owner"] try: - owner = pkgplan.image.get_user_by_name(owner) + owner = img.get_user_by_name(owner) except KeyError: - if not owner: - # Owner was missing; let validate raise a more - # informative error. - self.validate(fmri=fmri) - - # Otherwise, the user is unknown; attempt to report why. - pd = pkgplan.image.imageplan.pd - if owner in pd.removed_users: - # What package owned the user that was removed? - src_fmri = pd.removed_users[owner] - - raise pkg.actions.InvalidActionAttributesError( - self, [("owner", _("'{path}' cannot be " - "installed; the owner '{owner}' was " - "removed by '{src_fmri}'.").format( - path=path, owner=owner, - src_fmri=src_fmri))], - fmri=fmri) - elif owner in pd.added_users: - # This indicates an error on the part of the - # caller; the user should have been added - # before attempting to install the file. - raise - - # If this spot was reached, the user wasn't part of - # the operation plan and is completely unknown or - # invalid. - raise pkg.actions.InvalidActionAttributesError( - self, [("owner", _("'{path}' cannot be " - "installed; '{owner}' is an unknown " - "or invalid user.").format(path=path, - owner=owner))], - fmri=fmri) - - # The attribute may be missing. - group = self.attrs.get("group", "").rstrip() - - # Now attempt to determine the gid and raise an appropriate - # exception if it can't be. + errors.append(_("owner: {0} is unknown").format(owner)) + owner = None + if "group" in self.attrs: + group = self.attrs["group"] try: - group = pkgplan.image.get_group_by_name(group) + group = img.get_group_by_name(group) except KeyError: - if not group: - # Group was missing; let validate raise a more - # informative error. - self.validate(fmri=pkgplan.destination_fmri) - - # Otherwise, the group is unknown; attempt to report - # why. - pd = pkgplan.image.imageplan.pd - if group in pd.removed_groups: - # What package owned the group that was removed? - src_fmri = pd.removed_groups[group] - - raise pkg.actions.InvalidActionAttributesError( - self, [("group", _("'{path}' cannot be " - "installed; the group '{group}' was " - "removed by '{src_fmri}'.").format( - path=path, group=group, - src_fmri=src_fmri))], - fmri=pkgplan.destination_fmri) - elif group in pd.added_groups: - # This indicates an error on the part of the - # caller; the group should have been added - # before attempting to install the file. - raise - - # If this spot was reached, the group wasn't part of - # the operation plan and is completely unknown or - # invalid. - raise pkg.actions.InvalidActionAttributesError( - self, [("group", _("'{path}' cannot be " - "installed; '{group}' is an unknown " - "or invalid group.").format(path=path, - group=group))], - fmri=pkgplan.destination_fmri) - - return owner, group - - def verify_fsobj_common(self, img, ftype): - """Common verify logic for filesystem objects.""" - - errors = [] - warnings = [] - info = [] - - abort = False - def ftype_to_name(ftype): - assert ftype is not None - tmap = { - stat.S_IFIFO: "fifo", - stat.S_IFCHR: "character device", - stat.S_IFDIR: "directory", - stat.S_IFBLK: "block device", - stat.S_IFREG: "regular file", - stat.S_IFLNK: "symbolic link", - stat.S_IFSOCK: "socket", - } - if ftype in tmap: - return tmap[ftype] - else: - return "Unknown (0x{0:x})".format(ftype) - - mode = owner = group = None - if ftype != stat.S_IFLNK: - if "mode" in self.attrs: - mode = int(self.attrs["mode"], 8) - if "owner" in self.attrs: - owner = self.attrs["owner"] - try: - owner = img.get_user_by_name(owner) - except KeyError: - errors.append( - _("owner: {0} is unknown").format( - owner)) - owner = None - if "group" in self.attrs: - group = self.attrs["group"] - try: - group = img.get_group_by_name(group) - except KeyError: - errors.append( - _("group: {0} is unknown ").format( - group)) - group = None - - path = self.get_installed_path(img.get_root()) - - lstat = None - try: - lstat = os.lstat(path) - except OSError as e: - if e.errno == errno.ENOENT: - if self.attrs.get("preserve", "") == "legacy": - # It's acceptable for files with - # preserve=legacy to be missing; - # nothing more to validate. - return (lstat, errors, warnings, info, - abort) - errors.append( - _("missing: {0} does not exist").format( - ftype_to_name(ftype))) - elif e.errno == errno.EACCES: - errors.append(_("skipping: permission denied")) - else: - errors.append( - _("unexpected error: {0}").format(e)) - abort = True - - if abort: - return lstat, errors, warnings, info, abort - - if ftype is not None and ftype != stat.S_IFMT(lstat.st_mode): - errors.append(_("file type: '{found}' should be " - "'{expected}'").format( - found=ftype_to_name(stat.S_IFMT(lstat.st_mode)), - expected=ftype_to_name(ftype))) - abort = True - - if owner is not None and lstat.st_uid != owner: - errors.append(_("owner: '{found_name} " - "({found_id:d})' should be '{expected_name} " - "({expected_id:d})'").format( - found_name=img.get_name_by_uid(lstat.st_uid, - True), found_id=lstat.st_uid, - expected_name=self.attrs["owner"], - expected_id=owner)) - - if group is not None and lstat.st_gid != group: - errors.append(_("group: '{found_name} " - "({found_id})' should be '{expected_name} " - "({expected_id})'").format( - found_name=img.get_name_by_gid(lstat.st_gid, - True), found_id=lstat.st_gid, - expected_name=self.attrs["group"], - expected_id=group)) - - if mode is not None and stat.S_IMODE(lstat.st_mode) != mode: - errors.append(_("mode: {found:04o} should be " - "{expected:04o}").format( - found=stat.S_IMODE(lstat.st_mode), - expected=mode)) - return lstat, errors, warnings, info, abort - - def needsdata(self, orig, pkgplan): - """Returns True if the action transition requires a - datastream.""" + errors.append(_("group: {0} is unknown ").format(group)) + group = None + + path = self.get_installed_path(img.get_root()) + + lstat = None + try: + lstat = os.lstat(path) + except OSError as e: + if e.errno == errno.ENOENT: + if self.attrs.get("preserve", "") == "legacy": + # It's acceptable for files with + # preserve=legacy to be missing; + # nothing more to validate. + return (lstat, errors, warnings, info, abort) + errors.append( + _("missing: {0} does not exist").format( + ftype_to_name(ftype) + ) + ) + elif e.errno == errno.EACCES: + errors.append(_("skipping: permission denied")) + else: + errors.append(_("unexpected error: {0}").format(e)) + abort = True + + if abort: + return lstat, errors, warnings, info, abort + + if ftype is not None and ftype != stat.S_IFMT(lstat.st_mode): + errors.append( + _("file type: '{found}' should be " "'{expected}'").format( + found=ftype_to_name(stat.S_IFMT(lstat.st_mode)), + expected=ftype_to_name(ftype), + ) + ) + abort = True + + if owner is not None and lstat.st_uid != owner: + errors.append( + _( + "owner: '{found_name} " + "({found_id:d})' should be '{expected_name} " + "({expected_id:d})'" + ).format( + found_name=img.get_name_by_uid(lstat.st_uid, True), + found_id=lstat.st_uid, + expected_name=self.attrs["owner"], + expected_id=owner, + ) + ) + + if group is not None and lstat.st_gid != group: + errors.append( + _( + "group: '{found_name} " + "({found_id})' should be '{expected_name} " + "({expected_id})'" + ).format( + found_name=img.get_name_by_gid(lstat.st_gid, True), + found_id=lstat.st_gid, + expected_name=self.attrs["group"], + expected_id=group, + ) + ) + + if mode is not None and stat.S_IMODE(lstat.st_mode) != mode: + errors.append( + _("mode: {found:04o} should be " "{expected:04o}").format( + found=stat.S_IMODE(lstat.st_mode), expected=mode + ) + ) + return lstat, errors, warnings, info, abort + + def needsdata(self, orig, pkgplan): + """Returns True if the action transition requires a + datastream.""" + return False + + def get_size(self): + return int(self.attrs.get("pkg.size", "0")) + + def attrlist(self, name): + """return list containing value of named attribute.""" + try: + value = self.attrs[name] + except KeyError: + return [] + if type(value) is not list: + return [value] + return value + + def directory_references(self): + """Returns references to paths in action.""" + if "path" in self.attrs: + return [os.path.dirname(os.path.normpath(self.attrs["path"]))] + return [] + + def preinstall(self, pkgplan, orig): + """Client-side method that performs pre-install actions.""" + pass + + def install(self, pkgplan, orig): + """Client-side method that installs the object.""" + pass + + def postinstall(self, pkgplan, orig): + """Client-side method that performs post-install actions.""" + pass + + def preremove(self, pkgplan): + """Client-side method that performs pre-remove actions.""" + pass + + def remove(self, pkgplan): + """Client-side method that removes the object.""" + pass + + def remove_fsobj(self, pkgplan, path): + """Shared logic for removing file and link objects.""" + + # Necessary since removal logic is reused by install. + fmri = pkgplan.destination_fmri + if not fmri: + fmri = pkgplan.origin_fmri + + try: + portable.remove(path) + except EnvironmentError as e: + if e.errno == errno.ENOENT: + # Already gone; don't care. + return + elif e.errno == errno.EBUSY and os.path.ismount(path): + # User has replaced item with mountpoint, or a + # package has been poorly implemented. + err_txt = _( + "Unable to remove {0}; it is in use " + "as a mountpoint. To continue, please " + "unmount the filesystem at the target " + "location and try again." + ).format(path) + raise apx.ActionExecutionError( + self, details=err_txt, error=e, fmri=fmri + ) + elif e.errno == errno.EBUSY: + # os.path.ismount() is broken for lofs + # filesystems, so give a more generic + # error. + err_txt = _( + "Unable to remove {0}; it is in " + "use by the system, another process, or " + "as a mountpoint." + ).format(path) + raise apx.ActionExecutionError( + self, details=err_txt, error=e, fmri=fmri + ) + elif e.errno == errno.EPERM and not stat.S_ISDIR( + os.lstat(path).st_mode + ): + # Was expecting a directory in this failure + # case, it is not, so raise the error. + raise + elif e.errno in (errno.EACCES, errno.EROFS): + # Raise these permissions exceptions as-is. + raise + elif e.errno != errno.EPERM: + # An unexpected error. + raise apx.ActionExecutionError(self, error=e, fmri=fmri) + + # Attempting to remove a directory as performed above + # gives EPERM. First, try to remove the directory, + # if it isn't empty, salvage it. + try: + os.rmdir(path) + except OSError as e: + if e.errno in (errno.EPERM, errno.EACCES): + # Raise permissions exceptions as-is. + raise + elif e.errno not in (errno.EEXIST, errno.ENOTEMPTY): + # An unexpected error. + raise apx.ActionExecutionError(self, error=e, fmri=fmri) + + pkgplan.salvage(path) + + def postremove(self, pkgplan): + """Client-side method that performs post-remove actions.""" + pass + + def include_this(self, excludes, publisher=None): + """Callables in excludes list returns True + if action is to be included, False if + not""" + for c in excludes: + if not c(self, publisher=publisher): return False + return True + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action. + """ - def get_size(self): - return int(self.attrs.get("pkg.size", "0")) + self._validate(fmri=fmri) - def attrlist(self, name): - """return list containing value of named attribute.""" - try: - value = self.attrs[name] - except KeyError: - return [] - if type(value) is not list: - return [value] - return value - - def directory_references(self): - """Returns references to paths in action.""" - if "path" in self.attrs: - return [os.path.dirname(os.path.normpath( - self.attrs["path"]))] - return [] - - def preinstall(self, pkgplan, orig): - """Client-side method that performs pre-install actions.""" - pass - - def install(self, pkgplan, orig): - """Client-side method that installs the object.""" - pass - - def postinstall(self, pkgplan, orig): - """Client-side method that performs post-install actions.""" - pass - - def preremove(self, pkgplan): - """Client-side method that performs pre-remove actions.""" - pass - - def remove(self, pkgplan): - """Client-side method that removes the object.""" - pass - - def remove_fsobj(self, pkgplan, path): - """Shared logic for removing file and link objects.""" - - # Necessary since removal logic is reused by install. - fmri = pkgplan.destination_fmri - if not fmri: - fmri = pkgplan.origin_fmri + def _validate( + self, + fmri=None, + numeric_attrs=EmptyI, + raise_errors=True, + required_attrs=EmptyI, + single_attrs=EmptyI, + ): + """Common validation logic for all action types. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action. + 'numeric_attrs' is a list of attributes that must have an + integer value. + + 'raise_errors' is a boolean indicating whether errors should be + raised as an exception or returned as a list of tuples of the + form (attr_name, error_message). + + 'single_attrs' is a list of attributes that should only be + specified once. + """ + + errors = [] + for attr in self.attrs: + if ( + attr.startswith("facet.") + or attr == "reboot-needed" + or attr in single_attrs + ) and type(self.attrs[attr]) is list: + errors.append( + (attr, _("{0} may only be " "specified once").format(attr)) + ) + elif attr in numeric_attrs: try: - portable.remove(path) - except EnvironmentError as e: - if e.errno == errno.ENOENT: - # Already gone; don't care. - return - elif e.errno == errno.EBUSY and os.path.ismount(path): - # User has replaced item with mountpoint, or a - # package has been poorly implemented. - err_txt = _("Unable to remove {0}; it is in use " - "as a mountpoint. To continue, please " - "unmount the filesystem at the target " - "location and try again.").format(path) - raise apx.ActionExecutionError(self, - details=err_txt, error=e, fmri=fmri) - elif e.errno == errno.EBUSY: - # os.path.ismount() is broken for lofs - # filesystems, so give a more generic - # error. - err_txt = _("Unable to remove {0}; it is in " - "use by the system, another process, or " - "as a mountpoint.").format(path) - raise apx.ActionExecutionError(self, - details=err_txt, error=e, fmri=fmri) - elif e.errno == errno.EPERM and \ - not stat.S_ISDIR(os.lstat(path).st_mode): - # Was expecting a directory in this failure - # case, it is not, so raise the error. - raise - elif e.errno in (errno.EACCES, errno.EROFS): - # Raise these permissions exceptions as-is. - raise - elif e.errno != errno.EPERM: - # An unexpected error. - raise apx.ActionExecutionError(self, error=e, - fmri=fmri) - - # Attempting to remove a directory as performed above - # gives EPERM. First, try to remove the directory, - # if it isn't empty, salvage it. - try: - os.rmdir(path) - except OSError as e: - if e.errno in (errno.EPERM, errno.EACCES): - # Raise permissions exceptions as-is. - raise - elif e.errno not in (errno.EEXIST, - errno.ENOTEMPTY): - # An unexpected error. - raise apx.ActionExecutionError(self, - error=e, fmri=fmri) - - pkgplan.salvage(path) - - def postremove(self, pkgplan): - """Client-side method that performs post-remove actions.""" - pass - - def include_this(self, excludes, publisher=None): - """Callables in excludes list returns True - if action is to be included, False if - not""" - for c in excludes: - if not c(self, publisher=publisher): - return False - return True + int(self.attrs[attr]) + except (TypeError, ValueError): + errors.append( + (attr, _("{0} must be an " "integer").format(attr)) + ) + + for attr in required_attrs: + val = self.attrs.get(attr) + if not val or ( + isinstance(val, six.string_types) and not val.strip() + ): + errors.append((attr, _("{0} is required").format(attr))) + + if raise_errors and errors: + raise pkg.actions.InvalidActionAttributesError( + self, errors, fmri=fmri + ) + return errors + + def fsobj_checkpath(self, pkgplan, final_path): + """Verifies that the specified path doesn't contain one or more + symlinks relative to the image root. Raises an + ActionExecutionError exception if path check fails.""" + + valid_dirs = pkgplan.image.imageplan.valid_directories + parent_path = os.path.dirname(final_path) + if parent_path in valid_dirs: + return + + real_parent_path = os.path.realpath(parent_path) + if parent_path == real_parent_path: + valid_dirs.add(parent_path) + return + + fmri = pkgplan.destination_fmri + + # Now test each component of the parent path until one is found + # to be a link. When found, that's the parent that has been + # redirected to some other location. + tmp = parent_path + img_root = pkgplan.image.root.rstrip(os.path.sep) + while 1: + if tmp == img_root: + # No parent directories up to the root were + # found to be links, so assume this is ok. + valid_dirs.add(parent_path) + return + + if os.path.islink(tmp): + # We've found the parent that changed locations. + break + # Drop the final component. + tmp = os.path.split(tmp)[0] + + parent_dir = tmp + parent_target = os.path.realpath(parent_dir) + err_txt = _( + "Cannot install '{final_path}'; parent directory " + "{parent_dir} is a link to {parent_target}. To " + "continue, move the directory to its original location and " + "try again." + ).format(**locals()) + raise apx.ActionExecutionError(self, details=err_txt, fmri=fmri) + + if six.PY3: + + def __init__(self, data=None, **attrs): + # create a bound method (no unbound method in Python 3) + _common._generic_init(self, data, **attrs) - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action. - """ - - self._validate(fmri=fmri) - - def _validate(self, fmri=None, numeric_attrs=EmptyI, - raise_errors=True, required_attrs=EmptyI, single_attrs=EmptyI): - """Common validation logic for all action types. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action. - - 'numeric_attrs' is a list of attributes that must have an - integer value. - - 'raise_errors' is a boolean indicating whether errors should be - raised as an exception or returned as a list of tuples of the - form (attr_name, error_message). - - 'single_attrs' is a list of attributes that should only be - specified once. - """ - - errors = [] - for attr in self.attrs: - if ((attr.startswith("facet.") or - attr == "reboot-needed" or attr in single_attrs) and - type(self.attrs[attr]) is list): - errors.append((attr, _("{0} may only be " - "specified once").format(attr))) - elif attr in numeric_attrs: - try: - int(self.attrs[attr]) - except (TypeError, ValueError): - errors.append((attr, _("{0} must be an " - "integer").format(attr))) - - for attr in required_attrs: - val = self.attrs.get(attr) - if not val or \ - (isinstance(val, six.string_types) and not val.strip()): - errors.append((attr, - _("{0} is required").format(attr))) - - if raise_errors and errors: - raise pkg.actions.InvalidActionAttributesError(self, - errors, fmri=fmri) - return errors - - def fsobj_checkpath(self, pkgplan, final_path): - """Verifies that the specified path doesn't contain one or more - symlinks relative to the image root. Raises an - ActionExecutionError exception if path check fails.""" - - valid_dirs = pkgplan.image.imageplan.valid_directories - parent_path = os.path.dirname(final_path) - if parent_path in valid_dirs: - return - - real_parent_path = os.path.realpath(parent_path) - if parent_path == real_parent_path: - valid_dirs.add(parent_path) - return - - fmri = pkgplan.destination_fmri - - # Now test each component of the parent path until one is found - # to be a link. When found, that's the parent that has been - # redirected to some other location. - tmp = parent_path - img_root = pkgplan.image.root.rstrip(os.path.sep) - while 1: - if tmp == img_root: - # No parent directories up to the root were - # found to be links, so assume this is ok. - valid_dirs.add(parent_path) - return - - if os.path.islink(tmp): - # We've found the parent that changed locations. - break - # Drop the final component. - tmp = os.path.split(tmp)[0] - - parent_dir = tmp - parent_target = os.path.realpath(parent_dir) - err_txt = _("Cannot install '{final_path}'; parent directory " - "{parent_dir} is a link to {parent_target}. To " - "continue, move the directory to its original location and " - "try again.").format(**locals()) - raise apx.ActionExecutionError(self, details=err_txt, - fmri=fmri) - - if six.PY3: - def __init__(self, data=None, **attrs): - # create a bound method (no unbound method in Python 3) - _common._generic_init(self, data, **attrs) if six.PY2: - # create an unbound method - Action.__init__ = types.MethodType(_common._generic_init, None, Action) + # create an unbound method + Action.__init__ = types.MethodType(_common._generic_init, None, Action) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/group.py b/src/modules/actions/group.py index b7ab123ea..ef93759d5 100644 --- a/src/modules/actions/group.py +++ b/src/modules/actions/group.py @@ -31,229 +31,236 @@ a new user.""" from . import generic + try: - from pkg.cfgfiles import * - have_cfgfiles = True + from pkg.cfgfiles import * + + have_cfgfiles = True except ImportError: - have_cfgfiles = False + have_cfgfiles = False import pkg.client.api_errors as apx import pkg.actions + class GroupAction(generic.Action): - """Class representing a group packaging object. - note that grouplist members are selected via the user action, - although they are stored in the /etc/group file. Use of - group passwds is not supported.""" - - __slots__ = [] - - name = "group" - key_attr = "groupname" - globally_identical = True - ordinality = generic._orderdict[name] - - def extract(self, attrlist): - """ return a dictionary containing attrs in attr list - from self.attrs; omit if no such attrs in self.attrs""" - return dict((a, self.attrs[a]) - for a in self.attrs - if a in attrlist) - - def install(self, pkgplan, orig, retry=False): - """client-side method that adds the group - use gid from disk if different""" - if not have_cfgfiles: - # the group action is ignored if cfgfiles is not - # available. - return - - template = self.extract(["groupname", "gid"]) - - root = pkgplan.image.get_root() + """Class representing a group packaging object. + note that grouplist members are selected via the user action, + although they are stored in the /etc/group file. Use of + group passwds is not supported.""" + + __slots__ = [] + + name = "group" + key_attr = "groupname" + globally_identical = True + ordinality = generic._orderdict[name] + + def extract(self, attrlist): + """return a dictionary containing attrs in attr list + from self.attrs; omit if no such attrs in self.attrs""" + return dict((a, self.attrs[a]) for a in self.attrs if a in attrlist) + + def install(self, pkgplan, orig, retry=False): + """client-side method that adds the group + use gid from disk if different""" + if not have_cfgfiles: + # the group action is ignored if cfgfiles is not + # available. + return + + template = self.extract(["groupname", "gid"]) + + root = pkgplan.image.get_root() + try: + pw = PasswordFile(root, lock=True) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + pw = None + + gr = GroupFile(pkgplan.image) + + cur_attrs = gr.getvalue(template) + + # check for (wrong) pre-existing definition + # if so, rewrite entry using existing defs but new group entry + # (XXX this doesn't chown any files on-disk) + # else, nothing to do + if cur_attrs: + if "gid" not in self.attrs: + self.attrs["gid"] = cur_attrs["gid"] + elif self.attrs["gid"] != cur_attrs["gid"]: + cur_gid = cur_attrs["gid"] + template = cur_attrs + template["gid"] = self.attrs["gid"] + # Update the user database with the new gid + # as well in case group is someone's primary + # group. + usernames = pkgplan.image.get_usernames_by_gid(cur_gid) try: - pw = PasswordFile(root, lock=True) - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - pw = None - - gr = GroupFile(pkgplan.image) - - cur_attrs = gr.getvalue(template) - - # check for (wrong) pre-existing definition - # if so, rewrite entry using existing defs but new group entry - # (XXX this doesn't chown any files on-disk) - # else, nothing to do - if cur_attrs: - if "gid" not in self.attrs: - self.attrs["gid"] = cur_attrs["gid"] - elif self.attrs["gid"] != cur_attrs["gid"]: - cur_gid = cur_attrs["gid"] - template = cur_attrs; - template["gid"] = self.attrs["gid"] - # Update the user database with the new gid - # as well in case group is someone's primary - # group. - usernames = pkgplan.image.get_usernames_by_gid( - cur_gid) - try: - for username in usernames: - user_entry = pw.getuser( - username) - user_entry["gid"] = self.attrs[ - "gid"] - pw.setvalue(user_entry) - except Exception as e: - if pw: - pw.unlock() - txt = _("Group cannot be installed. " - "Updating related user entries " - "failed.") - raise apx.ActionExecutionError(self, - error=e, details=txt, - fmri=pkgplan.destination_fmri) - - # Deal with other columns in the group row. - # - # pkg has no support for the legacy password field - # in the group table and thus requires it to be - # empty for any pkg delivered group. So there is - # explicitly no support for updating - # template["password"] since we require it to be empty. - # - # If the admin has assigned any users to a group that is - # delivered as an action we preserve that list without - # attempting to validate it in any way. - if cur_attrs["user-list"]: - template["user-list"] = cur_attrs["user-list"] - - gr.setvalue(template) - try: - gr.writefile() - if pw: - pw.writefile() - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - # If we're in the postinstall phase and the - # files *still* aren't there, bail gracefully. - if retry: - txt = _("Group cannot be installed " - "without group database files " - "present.") - raise apx.ActionExecutionError(self, error=e, - details=txt, fmri=pkgplan.destination_fmri) - img = pkgplan.image - img._groups.add(self) - if "gid" in self.attrs: - img._groupsbyname[self.attrs["groupname"]] = \ - int(self.attrs["gid"]) - raise pkg.actions.ActionRetry(self) - finally: - if pw: - pw.unlock() - - def retry(self, pkgplan, orig): - groups = pkgplan.image._groups - if groups: - assert self in groups - self.install(pkgplan, orig, retry=True) - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - errors = [] - warnings = [] - info = [] - if not have_cfgfiles: - # The user action is ignored if cfgfiles is not - # available. - return errors, warnings, info - - gr = GroupFile(img) - - cur_attrs = gr.getvalue(self.attrs) - - # Get the default values if they're non-empty - grdefval = dict(( - (k, v) - for k, v in six.iteritems(gr.getdefaultvalues()) - if v != "" - )) - - # If "gid" is set dynamically, ignore what's on disk. - if "gid" not in self.attrs: - cur_attrs["gid"] = "" - - should_be = grdefval.copy() - should_be.update(self.attrs) - # Note where attributes are missing - for k in should_be: - cur_attrs.setdefault(k, "") - # Note where attributes should be empty - for k in cur_attrs: - if cur_attrs[k]: - should_be.setdefault(k, "") - # Ignore "user-list", as it is only modified by user actions - should_be.pop("user-list", None) - - errors = [ - _("{entry}: '{found}' should be '{expected}'").format( - entry=a, found=cur_attrs[a], - expected=should_be[a]) - for a in should_be - if cur_attrs[a] != should_be[a] - ] - return errors, warnings, info - - def remove(self, pkgplan): - """client-side method that removes this group""" - if not have_cfgfiles: - # The user action is ignored if cfgfiles is not - # available. - return - gr = GroupFile(pkgplan.image) - try: - gr.removevalue(self.attrs) - except KeyError as e: - # Already gone; don't care. - pass - else: - gr.writefile() - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - return [("group", "name", self.attrs["groupname"], None)] - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action. - """ - - generic.Action._validate(self, fmri=fmri, - numeric_attrs=("gid",), single_attrs=("gid",)) - - def compare(self, other): - """Arrange for group actions to be installed in gid order. This - will only hold true for actions installed at one time, but that's - generally what we need on initial install.""" - # put unspecifed gids at the end - a = int(self.attrs.get("gid", 1024)) - b = int(other.attrs.get("gid", 1024)) - return (a > b) - (a < b) + for username in usernames: + user_entry = pw.getuser(username) + user_entry["gid"] = self.attrs["gid"] + pw.setvalue(user_entry) + except Exception as e: + if pw: + pw.unlock() + txt = _( + "Group cannot be installed. " + "Updating related user entries " + "failed." + ) + raise apx.ActionExecutionError( + self, + error=e, + details=txt, + fmri=pkgplan.destination_fmri, + ) + + # Deal with other columns in the group row. + # + # pkg has no support for the legacy password field + # in the group table and thus requires it to be + # empty for any pkg delivered group. So there is + # explicitly no support for updating + # template["password"] since we require it to be empty. + # + # If the admin has assigned any users to a group that is + # delivered as an action we preserve that list without + # attempting to validate it in any way. + if cur_attrs["user-list"]: + template["user-list"] = cur_attrs["user-list"] + + gr.setvalue(template) + try: + gr.writefile() + if pw: + pw.writefile() + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + # If we're in the postinstall phase and the + # files *still* aren't there, bail gracefully. + if retry: + txt = _( + "Group cannot be installed " + "without group database files " + "present." + ) + raise apx.ActionExecutionError( + self, error=e, details=txt, fmri=pkgplan.destination_fmri + ) + img = pkgplan.image + img._groups.add(self) + if "gid" in self.attrs: + img._groupsbyname[self.attrs["groupname"]] = int( + self.attrs["gid"] + ) + raise pkg.actions.ActionRetry(self) + finally: + if pw: + pw.unlock() + + def retry(self, pkgplan, orig): + groups = pkgplan.image._groups + if groups: + assert self in groups + self.install(pkgplan, orig, retry=True) + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + errors = [] + warnings = [] + info = [] + if not have_cfgfiles: + # The user action is ignored if cfgfiles is not + # available. + return errors, warnings, info + + gr = GroupFile(img) + + cur_attrs = gr.getvalue(self.attrs) + + # Get the default values if they're non-empty + grdefval = dict( + ((k, v) for k, v in six.iteritems(gr.getdefaultvalues()) if v != "") + ) + + # If "gid" is set dynamically, ignore what's on disk. + if "gid" not in self.attrs: + cur_attrs["gid"] = "" + + should_be = grdefval.copy() + should_be.update(self.attrs) + # Note where attributes are missing + for k in should_be: + cur_attrs.setdefault(k, "") + # Note where attributes should be empty + for k in cur_attrs: + if cur_attrs[k]: + should_be.setdefault(k, "") + # Ignore "user-list", as it is only modified by user actions + should_be.pop("user-list", None) + + errors = [ + _("{entry}: '{found}' should be '{expected}'").format( + entry=a, found=cur_attrs[a], expected=should_be[a] + ) + for a in should_be + if cur_attrs[a] != should_be[a] + ] + return errors, warnings, info + + def remove(self, pkgplan): + """client-side method that removes this group""" + if not have_cfgfiles: + # The user action is ignored if cfgfiles is not + # available. + return + gr = GroupFile(pkgplan.image) + try: + gr.removevalue(self.attrs) + except KeyError as e: + # Already gone; don't care. + pass + else: + gr.writefile() + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + return [("group", "name", self.attrs["groupname"], None)] + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action. + """ + + generic.Action._validate( + self, fmri=fmri, numeric_attrs=("gid",), single_attrs=("gid",) + ) + + def compare(self, other): + """Arrange for group actions to be installed in gid order. This + will only hold true for actions installed at one time, but that's + generally what we need on initial install.""" + # put unspecifed gids at the end + a = int(self.attrs.get("gid", 1024)) + b = int(other.attrs.get("gid", 1024)) + return (a > b) - (a < b) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/hardlink.py b/src/modules/actions/hardlink.py index 855f5e457..878159c0e 100644 --- a/src/modules/actions/hardlink.py +++ b/src/modules/actions/hardlink.py @@ -37,108 +37,120 @@ from pkg import misc from pkg.client.api_errors import ActionExecutionError + class HardLinkAction(link.LinkAction): - """Class representing a hardlink-type packaging object.""" - - __slots__ = [] - - name = "hardlink" - ordinality = generic._orderdict[name] - - def compare(self, other): - return ((self.attrs["path"] > other.attrs["path"]) - - (self.attrs["path"] < other.attrs["path"])) - - def get_target_path(self): - """ return a path for target that is relative to image""" - - target = self.attrs["target"] - - # paths are either relative to path or absolute; - # both need to be passed through os.path.normpath to ensure - # that all ".." are removed to constrain target to image - - if target[0] != "/": - path = self.attrs["path"] - target = os.path.normpath( - os.path.join(os.path.split(path)[0], target)) - else: - target = os.path.normpath(target)[1:] - - return target - - def install(self, pkgplan, orig): - """Client-side method that installs a hard link.""" - - target = self.get_target_path() - path = self.get_installed_path(pkgplan.image.get_root()) - - # Don't allow installation through symlinks. - self.fsobj_checkpath(pkgplan, path) - - if not os.path.exists(os.path.dirname(path)): - self.makedirs(os.path.dirname(path), - mode=misc.PKG_DIR_MODE, - fmri=pkgplan.destination_fmri) - elif os.path.exists(path): - self.remove(pkgplan) - - fulltarget = os.path.normpath(os.path.sep.join( - (pkgplan.image.get_root(), target))) - - try: - os.link(fulltarget, path) - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise ActionExecutionError(self, error=e) - - # User or another process has removed target for - # hardlink, a package hasn't declared correct - # dependencies, or the target hasn't been installed - # yet. - err_txt = _("Unable to create hard link {path}; " - "target {target} is missing.").format( - path=path, target=fulltarget) - raise ActionExecutionError(self, details=err_txt, - error=e, fmri=pkgplan.destination_fmri) - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - # - # We only allow hard links to regular files, so the hard - # link should lstat() as a regular file. - # - lstat, errors, warnings, info, abort = \ - self.verify_fsobj_common(img, stat.S_IFREG) - if abort: - assert errors - return errors, warnings, info - - target = self.get_target_path() - path = self.get_installed_path(img.get_root()) - target = os.path.normpath(os.path.sep.join( - (img.get_root(), target))) - - if not os.path.exists(target): - errors.append(_("Target '{0}' does not exist").format( - self.attrs["target"])) - - # No point in continuing if no target - if errors: - return errors, warnings, info - - try: - if os.stat(path).st_ino != os.stat(target).st_ino: - errors.append(_("Broken: Path and Target ({0}) " - "inodes not the same").format( - self.get_target_path())) - except OSError as e: - errors.append(_("Unexpected Error: {0}").format(e)) - - return errors, warnings, info + """Class representing a hardlink-type packaging object.""" + + __slots__ = [] + + name = "hardlink" + ordinality = generic._orderdict[name] + + def compare(self, other): + return (self.attrs["path"] > other.attrs["path"]) - ( + self.attrs["path"] < other.attrs["path"] + ) + + def get_target_path(self): + """return a path for target that is relative to image""" + + target = self.attrs["target"] + + # paths are either relative to path or absolute; + # both need to be passed through os.path.normpath to ensure + # that all ".." are removed to constrain target to image + + if target[0] != "/": + path = self.attrs["path"] + target = os.path.normpath( + os.path.join(os.path.split(path)[0], target) + ) + else: + target = os.path.normpath(target)[1:] + + return target + + def install(self, pkgplan, orig): + """Client-side method that installs a hard link.""" + + target = self.get_target_path() + path = self.get_installed_path(pkgplan.image.get_root()) + + # Don't allow installation through symlinks. + self.fsobj_checkpath(pkgplan, path) + + if not os.path.exists(os.path.dirname(path)): + self.makedirs( + os.path.dirname(path), + mode=misc.PKG_DIR_MODE, + fmri=pkgplan.destination_fmri, + ) + elif os.path.exists(path): + self.remove(pkgplan) + + fulltarget = os.path.normpath( + os.path.sep.join((pkgplan.image.get_root(), target)) + ) + + try: + os.link(fulltarget, path) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise ActionExecutionError(self, error=e) + + # User or another process has removed target for + # hardlink, a package hasn't declared correct + # dependencies, or the target hasn't been installed + # yet. + err_txt = _( + "Unable to create hard link {path}; " + "target {target} is missing." + ).format(path=path, target=fulltarget) + raise ActionExecutionError( + self, details=err_txt, error=e, fmri=pkgplan.destination_fmri + ) + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + # + # We only allow hard links to regular files, so the hard + # link should lstat() as a regular file. + # + lstat, errors, warnings, info, abort = self.verify_fsobj_common( + img, stat.S_IFREG + ) + if abort: + assert errors + return errors, warnings, info + + target = self.get_target_path() + path = self.get_installed_path(img.get_root()) + target = os.path.normpath(os.path.sep.join((img.get_root(), target))) + + if not os.path.exists(target): + errors.append( + _("Target '{0}' does not exist").format(self.attrs["target"]) + ) + + # No point in continuing if no target + if errors: + return errors, warnings, info + + try: + if os.stat(path).st_ino != os.stat(target).st_ino: + errors.append( + _( + "Broken: Path and Target ({0}) " "inodes not the same" + ).format(self.get_target_path()) + ) + except OSError as e: + errors.append(_("Unexpected Error: {0}").format(e)) + + return errors, warnings, info + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/legacy.py b/src/modules/actions/legacy.py index a5b1035ac..483f22cb1 100644 --- a/src/modules/actions/legacy.py +++ b/src/modules/actions/legacy.py @@ -39,167 +39,197 @@ from . import generic from pkg import misc + class LegacyAction(generic.Action): - """Class representing a legacy SVr4 packaging object.""" - - __slots__ = [] - - name = "legacy" - key_attr = "pkg" - unique_attrs = ("category", "desc", "hotline", "name", "pkg", "vendor", - "version", "basedir", "pkginst", "pstamp", "sunw_prodvers") - refcountable = True - globally_identical = True - ordinality = generic._orderdict[name] - - def directory_references(self): - return [os.path.normpath(os.path.join("var/sadm/pkg", - self.attrs["pkg"]))] - - def install(self, pkgplan, orig): - """Client-side method that installs the dummy package files. - Use per-pkg hardlinks to create reference count for pkginfo - file""" - - pkgdir = os.path.join(pkgplan.image.get_root(), "var/sadm/pkg", - self.attrs["pkg"]) - - if not os.path.isdir(pkgdir): - os.makedirs(pkgdir, misc.PKG_DIR_MODE) - - pkginfo = os.path.join(pkgdir, "pkginfo") - - self.__old_refcount_cleanup(pkginfo, pkgdir) - - pkg_summary = pkgplan.pkg_summary - if len(pkg_summary) > 256: - # The len check is done to avoid slice creation. - pkg_summary = pkg_summary[:256] - - svr4attrs = { - "arch": pkgplan.image.get_arch(), - "basedir": "/", - "category": "system", - "desc": None, - "hotline": None, - "name": pkg_summary, - "pkg": self.attrs["pkg"], - "pkginst": self.attrs["pkg"], - "pstamp": None, - "sunw_prodvers": None, - "vendor": None, - "version": str(pkgplan.destination_fmri.version), - } - - attrs = [ - (a.upper(), b) - for a in svr4attrs - for b in ( self.attrs.get(a, svr4attrs[a]), ) - if b - ] - # Always overwrite installation timestamp - attrs.append(("INSTDATE", - time.strftime("%b %d %Y %H:%M"))) - - with open(pkginfo, "w") as pfile: - for k, v in attrs: - pfile.write("{0}={1}\n".format(k, v)) - - # the svr4 pkg commands need contents file to work, but the - # needed directories are in the SUNWpkgcmds package.... - # Since this file is always of zero length, we can let this - # fail until those directories (and the commands that - # need them) appear. - - try: - open(os.path.join(pkgplan.image.get_root(), - "var/sadm/install/contents"), "a").close() - except IOError as e: - if e.errno != errno.ENOENT: - raise - - os.chmod(pkginfo, misc.PKG_FILE_MODE) - - def __old_refcount_cleanup(self, pkginfo, pkgdir): - """Clean up the turds of the old refcounting implementation.""" - - # Don't assume that the hardlinks are still in place; just - # remove all consecutively numbered files. - for i in itertools.count(2): - lfile = os.path.join(pkgdir, "pkginfo.{0:d}".format(i)) - try: - os.unlink(lfile) - except OSError as e: - if e.errno == errno.ENOENT: - break - raise - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - errors = [] - warnings = [] - info = [] - - pkgdir = os.path.join(img.get_root(), "var/sadm/pkg", - self.attrs["pkg"]) - - # XXX this could be a better check & exactly validate pkginfo - # contents - if not os.path.isdir(pkgdir): - errors.append( - _("Missing directory var/sadm/pkg/{0}").format( - self.attrs["pkg"])) - return errors, warnings, info - - if not os.path.isfile(os.path.join(pkgdir, "pkginfo")): - errors.append(_("Missing file " - "var/sadm/pkg/{0}/pkginfo").format( - self.attrs["pkg"])) - return errors, warnings, info - - def remove(self, pkgplan): - - # pkg directory is removed via implicit directory removal - - pkgdir = os.path.join(pkgplan.image.get_root(), "var/sadm/pkg", - self.attrs["pkg"]) - - pkginfo = os.path.join(pkgdir, "pkginfo") - - self.__old_refcount_cleanup(pkginfo, pkgdir) - - try: - os.unlink(pkginfo) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - return [ - ("legacy", "legacy_pkg", self.attrs["pkg"], None), - ("legacy", "pkg", self.attrs["pkg"], None) - ] - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action.""" - - generic.Action._validate(self, fmri=fmri, - single_attrs=("category", "desc", "hotline", "name", - "vendor", "version")) + """Class representing a legacy SVr4 packaging object.""" + + __slots__ = [] + + name = "legacy" + key_attr = "pkg" + unique_attrs = ( + "category", + "desc", + "hotline", + "name", + "pkg", + "vendor", + "version", + "basedir", + "pkginst", + "pstamp", + "sunw_prodvers", + ) + refcountable = True + globally_identical = True + ordinality = generic._orderdict[name] + + def directory_references(self): + return [ + os.path.normpath(os.path.join("var/sadm/pkg", self.attrs["pkg"])) + ] + + def install(self, pkgplan, orig): + """Client-side method that installs the dummy package files. + Use per-pkg hardlinks to create reference count for pkginfo + file""" + + pkgdir = os.path.join( + pkgplan.image.get_root(), "var/sadm/pkg", self.attrs["pkg"] + ) + + if not os.path.isdir(pkgdir): + os.makedirs(pkgdir, misc.PKG_DIR_MODE) + + pkginfo = os.path.join(pkgdir, "pkginfo") + + self.__old_refcount_cleanup(pkginfo, pkgdir) + + pkg_summary = pkgplan.pkg_summary + if len(pkg_summary) > 256: + # The len check is done to avoid slice creation. + pkg_summary = pkg_summary[:256] + + svr4attrs = { + "arch": pkgplan.image.get_arch(), + "basedir": "/", + "category": "system", + "desc": None, + "hotline": None, + "name": pkg_summary, + "pkg": self.attrs["pkg"], + "pkginst": self.attrs["pkg"], + "pstamp": None, + "sunw_prodvers": None, + "vendor": None, + "version": str(pkgplan.destination_fmri.version), + } + + attrs = [ + (a.upper(), b) + for a in svr4attrs + for b in (self.attrs.get(a, svr4attrs[a]),) + if b + ] + # Always overwrite installation timestamp + attrs.append(("INSTDATE", time.strftime("%b %d %Y %H:%M"))) + + with open(pkginfo, "w") as pfile: + for k, v in attrs: + pfile.write("{0}={1}\n".format(k, v)) + + # the svr4 pkg commands need contents file to work, but the + # needed directories are in the SUNWpkgcmds package.... + # Since this file is always of zero length, we can let this + # fail until those directories (and the commands that + # need them) appear. + + try: + open( + os.path.join( + pkgplan.image.get_root(), "var/sadm/install/contents" + ), + "a", + ).close() + except IOError as e: + if e.errno != errno.ENOENT: + raise + + os.chmod(pkginfo, misc.PKG_FILE_MODE) + + def __old_refcount_cleanup(self, pkginfo, pkgdir): + """Clean up the turds of the old refcounting implementation.""" + + # Don't assume that the hardlinks are still in place; just + # remove all consecutively numbered files. + for i in itertools.count(2): + lfile = os.path.join(pkgdir, "pkginfo.{0:d}".format(i)) + try: + os.unlink(lfile) + except OSError as e: + if e.errno == errno.ENOENT: + break + raise + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + errors = [] + warnings = [] + info = [] + + pkgdir = os.path.join(img.get_root(), "var/sadm/pkg", self.attrs["pkg"]) + + # XXX this could be a better check & exactly validate pkginfo + # contents + if not os.path.isdir(pkgdir): + errors.append( + _("Missing directory var/sadm/pkg/{0}").format( + self.attrs["pkg"] + ) + ) + return errors, warnings, info + + if not os.path.isfile(os.path.join(pkgdir, "pkginfo")): + errors.append( + _("Missing file " "var/sadm/pkg/{0}/pkginfo").format( + self.attrs["pkg"] + ) + ) + return errors, warnings, info + + def remove(self, pkgplan): + # pkg directory is removed via implicit directory removal + + pkgdir = os.path.join( + pkgplan.image.get_root(), "var/sadm/pkg", self.attrs["pkg"] + ) + + pkginfo = os.path.join(pkgdir, "pkginfo") + + self.__old_refcount_cleanup(pkginfo, pkgdir) + + try: + os.unlink(pkginfo) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + return [ + ("legacy", "legacy_pkg", self.attrs["pkg"], None), + ("legacy", "pkg", self.attrs["pkg"], None), + ] + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action.""" + + generic.Action._validate( + self, + fmri=fmri, + single_attrs=( + "category", + "desc", + "hotline", + "name", + "vendor", + "version", + ), + ) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/license.py b/src/modules/actions/license.py index 85300c327..68e162865 100644 --- a/src/modules/actions/license.py +++ b/src/modules/actions/license.py @@ -44,233 +44,259 @@ from pkg.client.api_errors import ActionExecutionError from six.moves.urllib.parse import quote + class LicenseAction(generic.Action): - """Class representing a license packaging object.""" - - __slots__ = ["hash"] - - name = "license" - key_attr = "license" - unique_attrs = ("license", ) - reverse_indices = ("license", ) - refcountable = True - globally_identical = True - ordinality = generic._orderdict[name] - - has_payload = True - - def __init__(self, data=None, **attrs): - generic.Action.__init__(self, data, **attrs) - self.hash = "NOHASH" - - def preinstall(self, pkgplan, orig): - # Set attrs["path"] so filelist can handle this action; - # the path must be relative to the root of the image. - self.attrs["path"] = misc.relpath(os.path.join( - pkgplan.image.get_license_dir(pkgplan.destination_fmri), - "license." + quote(self.attrs["license"], "")), - pkgplan.image.get_root()) - - def install(self, pkgplan, orig): - """Client-side method that installs the license.""" - owner = 0 - group = 0 - - # ensure "path" is initialized. it may not be if we've loaded - # a plan that was previously prepared. - self.preinstall(pkgplan, orig) - - stream = self.data() - - path = self.get_installed_path(pkgplan.image.get_root()) - - # make sure the directory exists and the file is writable - if not os.path.exists(os.path.dirname(path)): - self.makedirs(os.path.dirname(path), - mode=misc.PKG_DIR_MODE, - fmri=pkgplan.destination_fmri) - elif os.path.exists(path): - os.chmod(path, misc.PKG_FILE_MODE) - - lfile = open(path, "wb") - try: - hash_attr, hash_val, hash_func = \ - digest.get_preferred_hash(self) - shasum = misc.gunzip_from_stream(stream, lfile, - hash_func=hash_func) - except zlib.error as e: - raise ActionExecutionError(self, details=_("Error " - "decompressing payload: {0}").format( - " ".join([str(a) for a in e.args])), error=e) - finally: - lfile.close() - stream.close() - - if shasum != hash_val: - raise ActionExecutionError(self, details=_("Action " - "data hash verification failure: expected: " - "{expected} computed: {actual} action: " - "{action}").format( - expected=hash_val, - actual=shasum, - action=self - )) - - os.chmod(path, misc.PKG_RO_FILE_MODE) - - try: - portable.chown(path, owner, group) - except OSError as e: - if e.errno != errno.EPERM: - raise - - def needsdata(self, orig, pkgplan): - # We always want to download the license - return True - - def verify(self, img, pfmri, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - errors = [] - warnings = [] - info = [] - - path = os.path.join(img.get_license_dir(pfmri), - "license." + quote(self.attrs["license"], "")) - - hash_attr, hash_val, hash_func = \ - digest.get_preferred_hash(self) - if args["forever"] == True: - try: - chash, cdata = misc.get_data_digest(path, - hash_func=hash_func) - except EnvironmentError as e: - if e.errno == errno.ENOENT: - errors.append(_("License file {0} does " - "not exist.").format(path)) - return errors, warnings, info - raise - - if chash != hash_val: - errors.append(_("Hash: '{found}' should be " - "'{expected}'").format(found=chash, - expected=hash_val)) - return errors, warnings, info - - def remove(self, pkgplan): - path = os.path.join( - pkgplan.image.get_license_dir(pkgplan.origin_fmri), - "license." + quote(self.attrs["license"], "")) - - try: - # Make file writable so it can be deleted - os.chmod(path, S_IWRITE|S_IREAD) - os.unlink(path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - indices = [("license", idx, self.attrs[idx], None) - for idx in self.reverse_indices] - if hasattr(self, "hash"): - indices.append(("license", "hash", self.hash, None)) - indices.append(("license", "content", self.hash, None)) - for attr in digest.DEFAULT_HASH_ATTRS: - # we already have an index entry for self.hash - if attr == "hash": - continue - hash = self.attrs[attr] - indices.append(("license", attr, hash, None)) - return indices - - def get_text(self, img, pfmri, alt_pub=None): - """Retrieves and returns the payload of the license (which - should be text). This may require remote retrieval of - resources and so this could raise a TransportError or other - ApiException. - If there are UTF-8 encoding errors in the text replace them - so that we still have a license to show rather than failing - the entire operation. The copy saved on disk is left as is. - - 'alt_pub' is an optional alternate Publisher to use for - any required transport operations. - """ - - path = self.get_local_path(img, pfmri) - hash_attr, hash_attr_val, hash_func = \ - digest.get_least_preferred_hash(self) - try: - with open(path, "rb") as fh: - length = os.stat(path).st_size - chash, txt = misc.get_data_digest(fh, - length=length, return_content=True, - hash_func=hash_func) - if chash == hash_attr_val: - return misc.force_str(txt, - errors='replace') - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - try: - if not alt_pub: - alt_pub = img.get_publisher(pfmri.publisher) - assert pfmri.publisher == alt_pub.prefix - return img.transport.get_content(alt_pub, hash_attr_val, - fmri=pfmri, hash_func=hash_func, errors="replace") - finally: - img.cleanup_downloads() - - def get_local_path(self, img, pfmri): - """Return an opener for the license text from the local disk or - None if the data for the text is not on-disk.""" - - if img.version <= 3: - # Older images stored licenses without accounting for - # '/', spaces, etc. properly. - path = os.path.join(img.get_license_dir(pfmri), - "license." + self.attrs["license"]) - else: - # Newer images ensure licenses are stored with encoded - # name so that '/', spaces, etc. are properly handled. - path = os.path.join(img.get_license_dir(pfmri), - "license." + quote(self.attrs["license"], - "")) - return path - - @property - def must_accept(self): - """Returns a boolean value indicating whether this license - action requires acceptance of its payload by clients.""" - - return self.attrs.get("must-accept", "").lower() == "true" - - @property - def must_display(self): - """Returns a boolean value indicating whether this license - action requires its payload to be displayed by clients.""" - - return self.attrs.get("must-display", "").lower() == "true" - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action.""" - - generic.Action._validate(self, fmri=fmri, - numeric_attrs=("pkg.csize", "pkg.size"), - single_attrs=("chash", "must-accept", "must-display")) + """Class representing a license packaging object.""" + + __slots__ = ["hash"] + + name = "license" + key_attr = "license" + unique_attrs = ("license",) + reverse_indices = ("license",) + refcountable = True + globally_identical = True + ordinality = generic._orderdict[name] + + has_payload = True + + def __init__(self, data=None, **attrs): + generic.Action.__init__(self, data, **attrs) + self.hash = "NOHASH" + + def preinstall(self, pkgplan, orig): + # Set attrs["path"] so filelist can handle this action; + # the path must be relative to the root of the image. + self.attrs["path"] = misc.relpath( + os.path.join( + pkgplan.image.get_license_dir(pkgplan.destination_fmri), + "license." + quote(self.attrs["license"], ""), + ), + pkgplan.image.get_root(), + ) + + def install(self, pkgplan, orig): + """Client-side method that installs the license.""" + owner = 0 + group = 0 + + # ensure "path" is initialized. it may not be if we've loaded + # a plan that was previously prepared. + self.preinstall(pkgplan, orig) + + stream = self.data() + + path = self.get_installed_path(pkgplan.image.get_root()) + + # make sure the directory exists and the file is writable + if not os.path.exists(os.path.dirname(path)): + self.makedirs( + os.path.dirname(path), + mode=misc.PKG_DIR_MODE, + fmri=pkgplan.destination_fmri, + ) + elif os.path.exists(path): + os.chmod(path, misc.PKG_FILE_MODE) + + lfile = open(path, "wb") + try: + hash_attr, hash_val, hash_func = digest.get_preferred_hash(self) + shasum = misc.gunzip_from_stream(stream, lfile, hash_func=hash_func) + except zlib.error as e: + raise ActionExecutionError( + self, + details=_("Error " "decompressing payload: {0}").format( + " ".join([str(a) for a in e.args]) + ), + error=e, + ) + finally: + lfile.close() + stream.close() + + if shasum != hash_val: + raise ActionExecutionError( + self, + details=_( + "Action " + "data hash verification failure: expected: " + "{expected} computed: {actual} action: " + "{action}" + ).format(expected=hash_val, actual=shasum, action=self), + ) + + os.chmod(path, misc.PKG_RO_FILE_MODE) + + try: + portable.chown(path, owner, group) + except OSError as e: + if e.errno != errno.EPERM: + raise + + def needsdata(self, orig, pkgplan): + # We always want to download the license + return True + + def verify(self, img, pfmri, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + errors = [] + warnings = [] + info = [] + + path = os.path.join( + img.get_license_dir(pfmri), + "license." + quote(self.attrs["license"], ""), + ) + + hash_attr, hash_val, hash_func = digest.get_preferred_hash(self) + if args["forever"] == True: + try: + chash, cdata = misc.get_data_digest(path, hash_func=hash_func) + except EnvironmentError as e: + if e.errno == errno.ENOENT: + errors.append( + _("License file {0} does " "not exist.").format(path) + ) + return errors, warnings, info + raise + + if chash != hash_val: + errors.append( + _("Hash: '{found}' should be " "'{expected}'").format( + found=chash, expected=hash_val + ) + ) + return errors, warnings, info + + def remove(self, pkgplan): + path = os.path.join( + pkgplan.image.get_license_dir(pkgplan.origin_fmri), + "license." + quote(self.attrs["license"], ""), + ) + + try: + # Make file writable so it can be deleted + os.chmod(path, S_IWRITE | S_IREAD) + os.unlink(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + indices = [ + ("license", idx, self.attrs[idx], None) + for idx in self.reverse_indices + ] + if hasattr(self, "hash"): + indices.append(("license", "hash", self.hash, None)) + indices.append(("license", "content", self.hash, None)) + for attr in digest.DEFAULT_HASH_ATTRS: + # we already have an index entry for self.hash + if attr == "hash": + continue + hash = self.attrs[attr] + indices.append(("license", attr, hash, None)) + return indices + + def get_text(self, img, pfmri, alt_pub=None): + """Retrieves and returns the payload of the license (which + should be text). This may require remote retrieval of + resources and so this could raise a TransportError or other + ApiException. + If there are UTF-8 encoding errors in the text replace them + so that we still have a license to show rather than failing + the entire operation. The copy saved on disk is left as is. + + 'alt_pub' is an optional alternate Publisher to use for + any required transport operations. + """ + + path = self.get_local_path(img, pfmri) + hash_attr, hash_attr_val, hash_func = digest.get_least_preferred_hash( + self + ) + try: + with open(path, "rb") as fh: + length = os.stat(path).st_size + chash, txt = misc.get_data_digest( + fh, length=length, return_content=True, hash_func=hash_func + ) + if chash == hash_attr_val: + return misc.force_str(txt, errors="replace") + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + try: + if not alt_pub: + alt_pub = img.get_publisher(pfmri.publisher) + assert pfmri.publisher == alt_pub.prefix + return img.transport.get_content( + alt_pub, + hash_attr_val, + fmri=pfmri, + hash_func=hash_func, + errors="replace", + ) + finally: + img.cleanup_downloads() + + def get_local_path(self, img, pfmri): + """Return an opener for the license text from the local disk or + None if the data for the text is not on-disk.""" + + if img.version <= 3: + # Older images stored licenses without accounting for + # '/', spaces, etc. properly. + path = os.path.join( + img.get_license_dir(pfmri), "license." + self.attrs["license"] + ) + else: + # Newer images ensure licenses are stored with encoded + # name so that '/', spaces, etc. are properly handled. + path = os.path.join( + img.get_license_dir(pfmri), + "license." + quote(self.attrs["license"], ""), + ) + return path + + @property + def must_accept(self): + """Returns a boolean value indicating whether this license + action requires acceptance of its payload by clients.""" + + return self.attrs.get("must-accept", "").lower() == "true" + + @property + def must_display(self): + """Returns a boolean value indicating whether this license + action requires its payload to be displayed by clients.""" + + return self.attrs.get("must-display", "").lower() == "true" + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action.""" + + generic.Action._validate( + self, + fmri=fmri, + numeric_attrs=("pkg.csize", "pkg.size"), + single_attrs=("chash", "must-accept", "must-display"), + ) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/link.py b/src/modules/actions/link.py index 94dd93192..bcb7760a5 100644 --- a/src/modules/actions/link.py +++ b/src/modules/actions/link.py @@ -41,181 +41,209 @@ from pkg import misc from pkg.client.api_errors import ActionExecutionError + class LinkAction(generic.Action): - """Class representing a link-type packaging object.""" - - __slots__ = [] - - name = "link" - key_attr = "path" - unique_attrs = "path", "target" - globally_identical = True - refcountable = True - namespace_group = "path" - ordinality = generic._orderdict[name] - - def install(self, pkgplan, orig): - """Client-side method that installs a link.""" - - target = self.attrs["target"] - path = self.get_installed_path(pkgplan.image.get_root()) - - # Don't allow installation through symlinks. - self.fsobj_checkpath(pkgplan, path) - - if orig and self.attrs.get('preserve', None) == 'true': - # Preserved links are not installed if this is an - # update (orig set) and: - # - the link has been removed - if not os.path.lexists(path): - return - # - the link has been changed - atarget = os.readlink(path) - if atarget != orig.attrs.get('target', None): - return - - if not os.path.exists(os.path.dirname(path)): - self.makedirs(os.path.dirname(path), - mode=misc.PKG_DIR_MODE, - fmri=pkgplan.destination_fmri) - - # XXX The exists-unlink-symlink path appears to be as safe as it - # gets to modify a link with the current symlink(2) interface. - if os.path.lexists(path): - self.remove(pkgplan) - os.symlink(target, path) - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - target = self.attrs["target"] - path = self.get_installed_path(img.get_root()) - preserve = self.attrs.get('preserve', None) - - # It's acceptable for links with preserve=true to be - # missing. - if preserve == 'true': - try: - os.lstat(path) - except OSError as e: - if e.errno == errno.ENOENT: - return [], [], [] - - lstat, errors, warnings, info, abort = \ - self.verify_fsobj_common(img, stat.S_IFLNK) - - if abort: - assert errors - return errors, warnings, info - - # It's acceptable for links with preserve=true to point - # elsewhere. - if preserve != 'true': - atarget = os.readlink(path) - - if target != atarget: - errors.append(_("Target: '{found}' should be " - "'{expected}'").format(found=atarget, - expected=target)) - return errors, warnings, info - - def remove(self, pkgplan): - """Removes the installed link from the system. If something - other than a link is found at the destination location, it - will be removed or salvaged.""" - - path = self.get_installed_path(pkgplan.image.get_root()) - return self.remove_fsobj(pkgplan, path) - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - rval = [ - (self.name, "basename", os.path.basename(self.attrs["path"]), - None), - (self.name, "path", os.path.sep + self.attrs["path"], None), - ] - if "mediator" in self.attrs: - rval.extend( - (self.name, k, v, None) - for k, v in six.iteritems(self.attrs) - if k.startswith("mediator") - ) - return rval - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action.""" - - errors = generic.Action._validate(self, fmri=fmri, - raise_errors=False, required_attrs=("target",), - single_attrs=("target", "mediator", "mediator-version", - "mediator-implementation", "mediator-priority")) - - if "mediator" not in self.attrs and \ - "mediator-version" not in self.attrs and \ - "mediator-implementation" not in self.attrs and \ - "mediator-priority" not in self.attrs: - if errors: - raise pkg.actions.InvalidActionAttributesError( - self, errors, fmri=fmri) - return - - mediator = self.attrs.get("mediator") - med_version = self.attrs.get("mediator-version") - med_implementation = self.attrs.get("mediator-implementation") - med_priority = self.attrs.get("mediator-priority") - - if not mediator and (med_version or med_implementation or - med_priority): - errors.append(("mediator", _("a mediator must be " - "provided when mediator-version, " - "mediator-implementation, or mediator-priority " - "is specified"))) - elif mediator is not None and \ - not isinstance(mediator, list): - valid, error = med.valid_mediator(mediator) - if not valid: - errors.append(("mediator", error)) - - if not (med_version or med_implementation): - errors.append(("mediator", _("a mediator-version or " - "mediator-implementation must be provided if a " - "mediator is specified"))) - - if med_version is not None and \ - not isinstance(med_version, list): - valid, error = med.valid_mediator_version(med_version) - if not valid: - errors.append(("mediator-version", error)) - - if med_implementation is not None and \ - not isinstance(med_implementation, list): - valid, error = med.valid_mediator_implementation( - med_implementation) - if not valid: - errors.append(("mediator-implementation", - error)) - - if med_priority is not None and \ - not isinstance(med_priority, list): - valid, error = med.valid_mediator_priority(med_priority) - if not valid: - errors.append(("mediator-priority", error)) - - if errors: - raise pkg.actions.InvalidActionAttributesError(self, - errors, fmri=fmri) + """Class representing a link-type packaging object.""" + + __slots__ = [] + + name = "link" + key_attr = "path" + unique_attrs = "path", "target" + globally_identical = True + refcountable = True + namespace_group = "path" + ordinality = generic._orderdict[name] + + def install(self, pkgplan, orig): + """Client-side method that installs a link.""" + + target = self.attrs["target"] + path = self.get_installed_path(pkgplan.image.get_root()) + + # Don't allow installation through symlinks. + self.fsobj_checkpath(pkgplan, path) + + if orig and self.attrs.get("preserve", None) == "true": + # Preserved links are not installed if this is an + # update (orig set) and: + # - the link has been removed + if not os.path.lexists(path): + return + # - the link has been changed + atarget = os.readlink(path) + if atarget != orig.attrs.get("target", None): + return + + if not os.path.exists(os.path.dirname(path)): + self.makedirs( + os.path.dirname(path), + mode=misc.PKG_DIR_MODE, + fmri=pkgplan.destination_fmri, + ) + + # XXX The exists-unlink-symlink path appears to be as safe as it + # gets to modify a link with the current symlink(2) interface. + if os.path.lexists(path): + self.remove(pkgplan) + os.symlink(target, path) + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + target = self.attrs["target"] + path = self.get_installed_path(img.get_root()) + preserve = self.attrs.get("preserve", None) + + # It's acceptable for links with preserve=true to be + # missing. + if preserve == "true": + try: + os.lstat(path) + except OSError as e: + if e.errno == errno.ENOENT: + return [], [], [] + + lstat, errors, warnings, info, abort = self.verify_fsobj_common( + img, stat.S_IFLNK + ) + + if abort: + assert errors + return errors, warnings, info + + # It's acceptable for links with preserve=true to point + # elsewhere. + if preserve != "true": + atarget = os.readlink(path) + + if target != atarget: + errors.append( + _("Target: '{found}' should be " "'{expected}'").format( + found=atarget, expected=target + ) + ) + return errors, warnings, info + + def remove(self, pkgplan): + """Removes the installed link from the system. If something + other than a link is found at the destination location, it + will be removed or salvaged.""" + + path = self.get_installed_path(pkgplan.image.get_root()) + return self.remove_fsobj(pkgplan, path) + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + rval = [ + (self.name, "basename", os.path.basename(self.attrs["path"]), None), + (self.name, "path", os.path.sep + self.attrs["path"], None), + ] + if "mediator" in self.attrs: + rval.extend( + (self.name, k, v, None) + for k, v in six.iteritems(self.attrs) + if k.startswith("mediator") + ) + return rval + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action.""" + + errors = generic.Action._validate( + self, + fmri=fmri, + raise_errors=False, + required_attrs=("target",), + single_attrs=( + "target", + "mediator", + "mediator-version", + "mediator-implementation", + "mediator-priority", + ), + ) + + if ( + "mediator" not in self.attrs + and "mediator-version" not in self.attrs + and "mediator-implementation" not in self.attrs + and "mediator-priority" not in self.attrs + ): + if errors: + raise pkg.actions.InvalidActionAttributesError( + self, errors, fmri=fmri + ) + return + + mediator = self.attrs.get("mediator") + med_version = self.attrs.get("mediator-version") + med_implementation = self.attrs.get("mediator-implementation") + med_priority = self.attrs.get("mediator-priority") + + if not mediator and (med_version or med_implementation or med_priority): + errors.append( + ( + "mediator", + _( + "a mediator must be " + "provided when mediator-version, " + "mediator-implementation, or mediator-priority " + "is specified" + ), + ) + ) + elif mediator is not None and not isinstance(mediator, list): + valid, error = med.valid_mediator(mediator) + if not valid: + errors.append(("mediator", error)) + + if not (med_version or med_implementation): + errors.append( + ( + "mediator", + _( + "a mediator-version or " + "mediator-implementation must be provided if a " + "mediator is specified" + ), + ) + ) + + if med_version is not None and not isinstance(med_version, list): + valid, error = med.valid_mediator_version(med_version) + if not valid: + errors.append(("mediator-version", error)) + + if med_implementation is not None and not isinstance( + med_implementation, list + ): + valid, error = med.valid_mediator_implementation(med_implementation) + if not valid: + errors.append(("mediator-implementation", error)) + + if med_priority is not None and not isinstance(med_priority, list): + valid, error = med.valid_mediator_priority(med_priority) + if not valid: + errors.append(("mediator-priority", error)) + + if errors: + raise pkg.actions.InvalidActionAttributesError( + self, errors, fmri=fmri + ) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/signature.py b/src/modules/actions/signature.py index bd3ed6031..6548342f4 100644 --- a/src/modules/actions/signature.py +++ b/src/modules/actions/signature.py @@ -46,597 +46,674 @@ valid_hash_algs = ("sha256", "sha384", "sha512") valid_sig_algs = ("rsa",) -if list(map(int, _cver.split('.'))) >= [3, 4, 0]: - # In cryptography 3.4, the hash classes moved to subclasses of - # hashes.hashAlgorithm - hash_registry = hashes.HashAlgorithm.__subclasses__() +if list(map(int, _cver.split("."))) >= [3, 4, 0]: + # In cryptography 3.4, the hash classes moved to subclasses of + # hashes.hashAlgorithm + hash_registry = hashes.HashAlgorithm.__subclasses__() else: - # For cryptography < 3.4.0 - import abc + # For cryptography < 3.4.0 + import abc - hash_registry = [ - ref() - for ref in abc._get_dump(hashes.HashAlgorithm)[0] - if ref() - ] + hash_registry = [ + ref() for ref in abc._get_dump(hashes.HashAlgorithm)[0] if ref() + ] -class SignatureAction(generic.Action): - """Class representing the signature-type packaging object.""" - - __slots__ = ["hash", "hash_alg", "sig_alg", "cert_ident", - "chain_cert_openers"] - - name = "signature" - key_attr = "value" - ordinality = generic._orderdict[name] - - def __init__(self, data, **attrs): - generic.Action.__init__(self, data, **attrs) - - self.hash = None - self.chain_cert_openers = [] - - try: - self.sig_alg, self.hash_alg = self.decompose_sig_alg( - self.attrs["algorithm"]) - except KeyError: - raise pkg.actions.InvalidActionError(str(self), - _("Missing algorithm attribute")) - if "value" not in self.attrs: - self.attrs["value"] = "" - if "version" not in self.attrs: - self.attrs["version"] = \ - str(generic.Action.sig_version) - - @property - def has_payload(self): - # If there's a hash, then there's a certificate to deliver - # with this action. - if not self.hash: - return False - return True - def needsdata(self, orig, pkgplan): - return self.has_payload - - @staticmethod - def make_opener(pth): - def file_opener(): - return open(pth, "rb") - return file_opener - - def __set_chain_certs_data(self, chain_certs, chash_dir): - """Store the information about the certs needed to validate - this signature in the signature. - - The 'chain_certs' parameter is a list of paths to certificates. - """ - - self.chain_cert_openers = [] - - # chain_hshes and chain_chshes are dictionaries which map a - # given hash or compressed hash attribute to a list of the hash - # values for each path in chain_certs. - chain_hshes = {} - chain_chshes = {} - chain_csizes = [] - chain_sizes = [] - - for attr in digest.DEFAULT_CHAIN_ATTRS: - chain_hshes[attr] = [] - for attr in digest.DEFAULT_CHAIN_CHASH_ATTRS: - chain_chshes[attr] = [] - - for pth in chain_certs: - if not os.path.exists(pth): - raise pkg.actions.ActionDataError( - _("No such file: '{0}'.").format(pth), - path=pth) - elif os.path.isdir(pth): - raise pkg.actions.ActionDataError( - _("'{0}' is not a file.").format(pth), - path=pth) - file_opener = self.make_opener(pth) - self.chain_cert_openers.append(file_opener) - self.attrs.setdefault("chain.sizes", []) - self.attrs.setdefault("chain.csizes", []) - - try: - fs = os.stat(pth) - chain_sizes.append(str(fs.st_size)) - except EnvironmentError as e: - raise pkg.actions.ActionDataError(e, path=pth) - # misc.get_data_digest takes care of closing the file - # that's opened below. - with file_opener() as fh: - hshes, data = misc.get_data_digest(fh, - length=fs.st_size, return_content=True, - hash_attrs=digest.DEFAULT_CHAIN_ATTRS, - hash_algs=digest.CHAIN_ALGS) - - for attr in hshes: - chain_hshes[attr].append(hshes[attr]) - - # We need a filename to use for the uncompressed chain - # cert, so get the preferred chain hash value from the - # chain_hshes - alg = digest.PREFERRED_HASH - if alg == "sha1": - attr = "chain" - else: - attr = "pkg.chain.{0}".format(alg) - chain_val = hshes.get(attr) - - csize, chashes = misc.compute_compressed_attrs( - chain_val, None, data, fs.st_size, chash_dir, - chash_attrs=digest.DEFAULT_CHAIN_CHASH_ATTRS, - chash_algs=digest.CHAIN_CHASH_ALGS) - - chain_csizes.append(csize) - for attr in chashes: - chain_chshes[attr].append(chashes[attr]) - - # Remove any unused hash attributes. - for cattrs in (chain_hshes, chain_chshes): - for attr in list(cattrs.keys()): - if not cattrs[attr]: - cattrs.pop(attr, None) - - if chain_hshes: - # These attributes are stored as a single value with - # spaces in it rather than multiple values to ensure - # the ordering remains consistent. - self.attrs["chain.sizes"] = " ".join(chain_sizes) - self.attrs["chain.csizes"] = " ".join(chain_csizes) - - for attr in digest.DEFAULT_CHAIN_ATTRS: - self.attrs[attr] = " ".join(chain_hshes[attr]) - for attr in digest.DEFAULT_CHAIN_CHASH_ATTRS: - self.attrs[attr] = " ".join(chain_chshes[attr]) - - def __get_hash_by_name(self, name): - """Get the cryptopgraphy Hash() class based on the OpenSSL - algorithm name.""" - global hash_registry - - for h in hash_registry: - if h.name == name: - return h - - def get_size(self): - res = generic.Action.get_size(self) - for s in self.attrs.get("chain.sizes", "").split(): - res += int(s) - return res - - def get_action_chain_csize(self): - res = 0 - for s in self.attrs.get("chain.csizes", "").split(): - res += int(s) - return res - - def get_chain_csize(self, chain): - # The length of 'chain' is also going to be the length - # of pkg.chain., so there's no need to look for - # other hash attributes here. - for c, s in zip(self.attrs.get("chain", "").split(), - self.attrs.get("chain.csizes", "").split()): - if c == chain: - return int(s) - return None - - def get_chain_size(self, chain): - for c, s in zip(self.attrs.get("chain", "").split(), - self.attrs.get("chain.sizes", "").split()): - if c == chain: - return int(s) - return None - - def sig_str(self, a, version): - """Create a stable string representation of an action that - is deterministic in its creation. If creating a string from an - action is non-deterministic, then manifest signing cannot work. - - The parameter 'a' is the signature action that's going to use - the string produced. It's needed for the signature string - action, and is here to keep the method signature the same. - """ - - # Any changes to this function mean Action.sig_version must be - # incremented. - - if version != generic.Action.sig_version: - raise apx.UnsupportedSignatureVersion(version, sig=self) - # Signature actions don't sign other signature actions. So if - # the action that's doing the signing isn't ourself, return - # nothing. - if str(a) != str(self): - return None - - # It's necessary to sign the action as the client will see it, - # post publication. To do that, it's necessary to simulate the - # publication process on a copy of the action, converting - # paths to hashes and adding size information. - tmp_a = SignatureAction(None, **self.attrs) - # The signature action can't sign the value of the value - # attribute, but it can sign that attribute's name. - tmp_a.attrs["value"] = "" - if hasattr(self.data, "__call__"): - size = int(self.attrs.get("pkg.size", 0)) - tmp_dir = tempfile.mkdtemp() - with self.data() as fh: - hashes, data = misc.get_data_digest(fh, - size, return_content=True, - hash_attrs=digest.DEFAULT_HASH_ATTRS, - hash_algs=digest.HASH_ALGS) - tmp_a.attrs.update(hashes) - # "hash" is special since it shouldn't appear in - # the action attributes, it gets set as a member - # instead. - if "hash" in tmp_a.attrs: - tmp_a.hash = tmp_a.attrs["hash"] - del tmp_a.attrs["hash"] - - # The use of self.hash here is just to point to a - # filename, the type of hash used for self.hash is - # irrelevant. Note that our use of self.hash for the - # basename will need to be modified when we finally move - # off SHA-1 hashes. - csize, chashes = misc.compute_compressed_attrs( - os.path.basename(self.hash), self.hash, data, size, - tmp_dir) - shutil.rmtree(tmp_dir) - tmp_a.attrs["pkg.csize"] = csize - for attr in chashes: - tmp_a.attrs[attr] = chashes[attr] - elif self.hash: - tmp_a.hash = self.hash - for attr in digest.DEFAULT_HASH_ATTRS: - if attr in self.attrs: - tmp_a.attrs[attr] = self.attrs[attr] - - csizes = [] - chain_hashes = {} - chain_chashes = {} - for attr in digest.DEFAULT_CHAIN_ATTRS: - chain_hashes[attr] = [] - for attr in digest.DEFAULT_CHAIN_CHASH_ATTRS: - chain_chashes[attr] = [] - - sizes = self.attrs.get("chain.sizes", "").split() - for i, c in enumerate(self.chain_cert_openers): - size = int(sizes[i]) - tmp_dir = tempfile.mkdtemp() - hshes, data = misc.get_data_digest(c(), size, - return_content=True, - hash_attrs=digest.DEFAULT_CHAIN_ATTRS, - hash_algs=digest.CHAIN_ALGS) - - for attr in hshes: - chain_hashes[attr].append(hshes[attr]) - - csize, chashes = misc.compute_compressed_attrs("tmp", - None, data, size, tmp_dir, - chash_attrs=digest.DEFAULT_CHAIN_CHASH_ATTRS, - chash_algs=digest.CHAIN_CHASH_ALGS) - shutil.rmtree(tmp_dir) - csizes.append(csize) - for attr in chashes: - chain_chashes[attr].append(chashes[attr]) - - if chain_hashes: - for attr in digest.DEFAULT_CHAIN_ATTRS: - if chain_hashes[attr]: - tmp_a.attrs[attr] = " ".join( - chain_hashes[attr]) - - # Now that tmp_a looks like the post-published action, transform - # it into a string using the generic sig_str method. - return generic.Action.sig_str(tmp_a, tmp_a, version) - - def actions_to_str(self, acts, version): - """Transforms a collection of actions into a string that is - used to sign those actions.""" - - # If a is None, then the action was another signature action so - # discard it from the information to be signed. - return "\n".join(sorted( - (a for a in - (b.sig_str(self, version) for b in acts) - if a is not None))) - - def retrieve_chain_certs(self, pub): - """Retrieve the chain certificates needed to validate this - signature.""" - - chain_attr, chain_val, hash_func = \ - digest.get_least_preferred_hash(self, - hash_type=digest.CHAIN) - # We may not have any chain certs for this signature - if not chain_val: - return - for c in chain_val.split(): - pub.get_cert_by_hash(c, only_retrieve=True, - hash_func=hash_func) - - def get_chain_certs(self, least_preferred=False): - """Return a list of the chain certificates needed to validate - this signature. When retrieving the content from the - repository, we use the "least preferred" hash for backwards - compatibility, but when verifying the content, we use the - "most preferred" hash.""" - - if least_preferred: - chain_attr, chain_val, hash_func = \ - digest.get_least_preferred_hash(self, - hash_type=digest.CHAIN) - else: - chain_attr, chain_val, hash_func = \ - digest.get_preferred_hash(self, - hash_type=digest.CHAIN) - if not chain_val: - return [] - return chain_val.split() - - def get_chain_certs_chashes(self, least_preferred=False): - """Return a list of the chain certificates needed to validate - this signature.""" - - if least_preferred: - chain_chash_attr, chain_chash_val, hash_func = \ - digest.get_least_preferred_hash(self, - hash_type=digest.CHAIN_CHASH) - else: - chain_chash_attr, chain_chash_val, hash_func = \ - digest.get_preferred_hash(self, - hash_type=digest.CHAIN_CHASH) - if not chain_chash_val: - return [] - return chain_chash_val.split() - - def is_signed(self): - """Returns True if this action is signed using a key, instead - of simply being a hash. Since variant tagged signature - actions are not handled yet, it also returns False in that - case.""" - - return self.hash is not None and not self.get_variant_template() - - @staticmethod - def decompose_sig_alg(val): - """Split the sig_alg attribute up in to something useful.""" - - for s in valid_sig_algs: - for h in valid_hash_algs: - t = "{0}-{1}".format(s, h) - if val == t: - return s, h - for h in valid_hash_algs: - if h == val: - return None, h - return None, None - - def verify_sig(self, acts, pub, trust_anchors, use_crls, - required_names=None): - """Try to verify this signature. It can return True or - None. None means we didn't know how to verify this signature. - If we do know how to verify the signature but it doesn't verify, - then an exception is raised. - - The 'acts' parameter is the iterable of actions against which - to verify the signature. - - The 'pub' parameter is the publisher that published the - package this action signed. - - The 'trust_anchors' parameter contains the trust anchors to use - when verifying the signature. - - The 'required_names' parameter is a set of strings that must - be seen as a CN in the chain of trust for the certificate.""" - - ver = int(self.attrs["version"]) - # If this signature is tagged with variants, if the version is - # higher than one we know about, or it uses an unrecognized - # hash algorithm, we can't handle it yet. - if self.get_variant_template() or \ - ver > generic.Action.sig_version or not self.hash_alg: - return None - # Turning this into a list makes debugging vastly more - # tractable. - acts = list(acts) - # If self.hash is None, then the signature is storing a hash - # of the actions, not a signed value. - if self.hash is None: - assert self.sig_alg is None - h = hashlib.new(self.hash_alg) - h.update(misc.force_bytes(self.actions_to_str( - acts, ver))) - computed_hash = h.digest() - # The attrs value is stored in hex so that it's easy - # to read. - if misc.hex_to_binary(self.attrs["value"]) != \ - computed_hash: - raise apx.UnverifiedSignature(self, - _("The signature value did not match the " - "expected value. action: {0}").format(self)) - return True - # Verify a signature that's not just a hash. - if self.sig_alg is None: - return None - # Get the certificate paired with the key which signed this - # action. - attr, hash_val, hash_func = \ - digest.get_least_preferred_hash(self) - cert = pub.get_cert_by_hash(hash_val, verify_hash=True, - hash_func=hash_func) - # Make sure that the intermediate certificates that are needed - # to validate this signature are present. - self.retrieve_chain_certs(pub) - try: - # This import is placed here to break a circular - # import seen when merge.py is used. - from pkg.client.publisher import CODE_SIGNING_USE - # Verify the certificate whose key created this - # signature action. - pub.verify_chain(cert, trust_anchors, 0, use_crls, - required_names=required_names, - usages=CODE_SIGNING_USE) - except apx.SigningException as e: - e.act = self - raise - # Check that the certificate verifies against this signature. - pub_key = cert.public_key() - hhash = self.__get_hash_by_name(self.hash_alg) - try: - pub_key.verify( - misc.hex_to_binary(self.attrs["value"]), - misc.force_bytes(self.actions_to_str(acts, ver)), - padding.PKCS1v15(), - hhash()) - except InvalidSignature: - raise apx.UnverifiedSignature(self, - _("The signature value did not match the expected " - "value.")) - - return True - - def set_signature(self, acts, key_path=None, chain_paths=misc.EmptyI, - chash_dir=None): - """Sets the signature value for this action. - - The 'acts' parameter is the iterable of actions this action - should sign. - - The 'key_path' parameter is the path to the file containing the - private key which is used to sign the actions. - - The 'chain_paths' parameter is an iterable of paths to - certificates which are needed to form the chain of trust from - the certificate associated with the key in 'key_path' to one of - the CAs for the publisher of the actions. - - The 'chash_dir' parameter is the temporary directory to use - while calculating the compressed hashes for chain certs.""" - - # Turning this into a list makes debugging vastly more - # tractable. - acts = list(acts) - - # If key_path is None, then set value to be the hash - # of the actions. - if key_path is None: - # If no private key is set, then no certificate should - # have been given. - assert self.data is None - h = hashlib.new(self.hash_alg) - h.update(misc.force_bytes(self.actions_to_str(acts, - generic.Action.sig_version))) - self.attrs["value"] = h.hexdigest() - else: - # If a private key is used, then the certificate it's - # paired with must be provided. - assert self.data is not None - self.__set_chain_certs_data(chain_paths, chash_dir) - - try: - with open(key_path, "rb") as f: - priv_key = serialization.load_pem_private_key( - f.read(), password=None, - backend=default_backend()) - except ValueError: - raise apx.BadFileFormat(_("{0} was expected to " - "be a RSA key but could not be read " - "correctly.").format(key_path)) - - hhash = self.__get_hash_by_name(self.hash_alg) - self.attrs["value"] = misc.binary_to_hex(priv_key.sign( - misc.force_bytes(self.actions_to_str(acts, - generic.Action.sig_version)), - padding.PKCS1v15(), - hhash() - )) - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - res = [] - if self.hash is not None: - res.append((self.name, "certificate", self.hash, - self.hash)) - res.append((self.name, "algorithm", - self.attrs["algorithm"], self.attrs["algorithm"])) - res.append((self.name, "signature", self.attrs["value"], - self.attrs["value"])) - for attr in digest.DEFAULT_HASH_ATTRS: - # We already have an index entry for self.hash; - # we only want hash attributes other than "hash". - hash = self.attrs.get(attr) - if attr != "hash" and hash is not None: - res.append((self.name, attr, hash, None)) - return res - - def identical(self, other, hsh): - """Check whether another action is identical to this - signature.""" - # Only signature actions can be identical to other signature - # actions. - if self.name != other.name: - return False - # If the code signing certs are identical, the more checking is - # needed. - # Determine if we share any hash attribute values with the other - # action. - matching_hash_attrs = set() - for attr in digest.DEFAULT_HASH_ATTRS: - if attr == "hash": - # we deal with the 'hash' member later - continue - if attr in self.attrs and attr in other.attrs and \ - self.attrs[attr] == other.attrs[attr] and \ - self.assrs[attr]: - matching_hash_attrs.add(attr) - if hsh and hsh == other.attrs.get(attr): - # Technically 'hsh' isn't a hash attr, it's - # a hash attr value, but that's enough for us - # to consider it as potentially identical. - matching_hash_attrs.add(hsh) - - if hsh == other.hash or self.hash == other.hash or \ - matching_hash_attrs: - # If the algorithms are using different algorithms or - # have different versions, then they're not identical. - if self.attrs["algorithm"] != \ - other.attrs["algorithm"] or \ - self.attrs["version"] != other.attrs["version"]: - return False - # If the values are the same, then they're identical. - if self.attrs["value"] == other.attrs["value"]: - return True - raise apx.AlmostIdentical(hsh, - self.attrs["algorithm"], self.attrs["version"]) +class SignatureAction(generic.Action): + """Class representing the signature-type packaging object.""" + + __slots__ = [ + "hash", + "hash_alg", + "sig_alg", + "cert_ident", + "chain_cert_openers", + ] + + name = "signature" + key_attr = "value" + ordinality = generic._orderdict[name] + + def __init__(self, data, **attrs): + generic.Action.__init__(self, data, **attrs) + + self.hash = None + self.chain_cert_openers = [] + + try: + self.sig_alg, self.hash_alg = self.decompose_sig_alg( + self.attrs["algorithm"] + ) + except KeyError: + raise pkg.actions.InvalidActionError( + str(self), _("Missing algorithm attribute") + ) + if "value" not in self.attrs: + self.attrs["value"] = "" + if "version" not in self.attrs: + self.attrs["version"] = str(generic.Action.sig_version) + + @property + def has_payload(self): + # If there's a hash, then there's a certificate to deliver + # with this action. + if not self.hash: + return False + return True + + def needsdata(self, orig, pkgplan): + return self.has_payload + + @staticmethod + def make_opener(pth): + def file_opener(): + return open(pth, "rb") + + return file_opener + + def __set_chain_certs_data(self, chain_certs, chash_dir): + """Store the information about the certs needed to validate + this signature in the signature. + + The 'chain_certs' parameter is a list of paths to certificates. + """ + + self.chain_cert_openers = [] + + # chain_hshes and chain_chshes are dictionaries which map a + # given hash or compressed hash attribute to a list of the hash + # values for each path in chain_certs. + chain_hshes = {} + chain_chshes = {} + chain_csizes = [] + chain_sizes = [] + + for attr in digest.DEFAULT_CHAIN_ATTRS: + chain_hshes[attr] = [] + for attr in digest.DEFAULT_CHAIN_CHASH_ATTRS: + chain_chshes[attr] = [] + + for pth in chain_certs: + if not os.path.exists(pth): + raise pkg.actions.ActionDataError( + _("No such file: '{0}'.").format(pth), path=pth + ) + elif os.path.isdir(pth): + raise pkg.actions.ActionDataError( + _("'{0}' is not a file.").format(pth), path=pth + ) + file_opener = self.make_opener(pth) + self.chain_cert_openers.append(file_opener) + self.attrs.setdefault("chain.sizes", []) + self.attrs.setdefault("chain.csizes", []) + + try: + fs = os.stat(pth) + chain_sizes.append(str(fs.st_size)) + except EnvironmentError as e: + raise pkg.actions.ActionDataError(e, path=pth) + # misc.get_data_digest takes care of closing the file + # that's opened below. + with file_opener() as fh: + hshes, data = misc.get_data_digest( + fh, + length=fs.st_size, + return_content=True, + hash_attrs=digest.DEFAULT_CHAIN_ATTRS, + hash_algs=digest.CHAIN_ALGS, + ) + + for attr in hshes: + chain_hshes[attr].append(hshes[attr]) + + # We need a filename to use for the uncompressed chain + # cert, so get the preferred chain hash value from the + # chain_hshes + alg = digest.PREFERRED_HASH + if alg == "sha1": + attr = "chain" + else: + attr = "pkg.chain.{0}".format(alg) + chain_val = hshes.get(attr) + + csize, chashes = misc.compute_compressed_attrs( + chain_val, + None, + data, + fs.st_size, + chash_dir, + chash_attrs=digest.DEFAULT_CHAIN_CHASH_ATTRS, + chash_algs=digest.CHAIN_CHASH_ALGS, + ) + + chain_csizes.append(csize) + for attr in chashes: + chain_chshes[attr].append(chashes[attr]) + + # Remove any unused hash attributes. + for cattrs in (chain_hshes, chain_chshes): + for attr in list(cattrs.keys()): + if not cattrs[attr]: + cattrs.pop(attr, None) + + if chain_hshes: + # These attributes are stored as a single value with + # spaces in it rather than multiple values to ensure + # the ordering remains consistent. + self.attrs["chain.sizes"] = " ".join(chain_sizes) + self.attrs["chain.csizes"] = " ".join(chain_csizes) + + for attr in digest.DEFAULT_CHAIN_ATTRS: + self.attrs[attr] = " ".join(chain_hshes[attr]) + for attr in digest.DEFAULT_CHAIN_CHASH_ATTRS: + self.attrs[attr] = " ".join(chain_chshes[attr]) + + def __get_hash_by_name(self, name): + """Get the cryptopgraphy Hash() class based on the OpenSSL + algorithm name.""" + global hash_registry + + for h in hash_registry: + if h.name == name: + return h + + def get_size(self): + res = generic.Action.get_size(self) + for s in self.attrs.get("chain.sizes", "").split(): + res += int(s) + return res + + def get_action_chain_csize(self): + res = 0 + for s in self.attrs.get("chain.csizes", "").split(): + res += int(s) + return res + + def get_chain_csize(self, chain): + # The length of 'chain' is also going to be the length + # of pkg.chain., so there's no need to look for + # other hash attributes here. + for c, s in zip( + self.attrs.get("chain", "").split(), + self.attrs.get("chain.csizes", "").split(), + ): + if c == chain: + return int(s) + return None + + def get_chain_size(self, chain): + for c, s in zip( + self.attrs.get("chain", "").split(), + self.attrs.get("chain.sizes", "").split(), + ): + if c == chain: + return int(s) + return None + + def sig_str(self, a, version): + """Create a stable string representation of an action that + is deterministic in its creation. If creating a string from an + action is non-deterministic, then manifest signing cannot work. + + The parameter 'a' is the signature action that's going to use + the string produced. It's needed for the signature string + action, and is here to keep the method signature the same. + """ + + # Any changes to this function mean Action.sig_version must be + # incremented. + + if version != generic.Action.sig_version: + raise apx.UnsupportedSignatureVersion(version, sig=self) + # Signature actions don't sign other signature actions. So if + # the action that's doing the signing isn't ourself, return + # nothing. + if str(a) != str(self): + return None + + # It's necessary to sign the action as the client will see it, + # post publication. To do that, it's necessary to simulate the + # publication process on a copy of the action, converting + # paths to hashes and adding size information. + tmp_a = SignatureAction(None, **self.attrs) + # The signature action can't sign the value of the value + # attribute, but it can sign that attribute's name. + tmp_a.attrs["value"] = "" + if hasattr(self.data, "__call__"): + size = int(self.attrs.get("pkg.size", 0)) + tmp_dir = tempfile.mkdtemp() + with self.data() as fh: + hashes, data = misc.get_data_digest( + fh, + size, + return_content=True, + hash_attrs=digest.DEFAULT_HASH_ATTRS, + hash_algs=digest.HASH_ALGS, + ) + tmp_a.attrs.update(hashes) + # "hash" is special since it shouldn't appear in + # the action attributes, it gets set as a member + # instead. + if "hash" in tmp_a.attrs: + tmp_a.hash = tmp_a.attrs["hash"] + del tmp_a.attrs["hash"] + + # The use of self.hash here is just to point to a + # filename, the type of hash used for self.hash is + # irrelevant. Note that our use of self.hash for the + # basename will need to be modified when we finally move + # off SHA-1 hashes. + csize, chashes = misc.compute_compressed_attrs( + os.path.basename(self.hash), self.hash, data, size, tmp_dir + ) + shutil.rmtree(tmp_dir) + tmp_a.attrs["pkg.csize"] = csize + for attr in chashes: + tmp_a.attrs[attr] = chashes[attr] + elif self.hash: + tmp_a.hash = self.hash + for attr in digest.DEFAULT_HASH_ATTRS: + if attr in self.attrs: + tmp_a.attrs[attr] = self.attrs[attr] + + csizes = [] + chain_hashes = {} + chain_chashes = {} + for attr in digest.DEFAULT_CHAIN_ATTRS: + chain_hashes[attr] = [] + for attr in digest.DEFAULT_CHAIN_CHASH_ATTRS: + chain_chashes[attr] = [] + + sizes = self.attrs.get("chain.sizes", "").split() + for i, c in enumerate(self.chain_cert_openers): + size = int(sizes[i]) + tmp_dir = tempfile.mkdtemp() + hshes, data = misc.get_data_digest( + c(), + size, + return_content=True, + hash_attrs=digest.DEFAULT_CHAIN_ATTRS, + hash_algs=digest.CHAIN_ALGS, + ) + + for attr in hshes: + chain_hashes[attr].append(hshes[attr]) + + csize, chashes = misc.compute_compressed_attrs( + "tmp", + None, + data, + size, + tmp_dir, + chash_attrs=digest.DEFAULT_CHAIN_CHASH_ATTRS, + chash_algs=digest.CHAIN_CHASH_ALGS, + ) + shutil.rmtree(tmp_dir) + csizes.append(csize) + for attr in chashes: + chain_chashes[attr].append(chashes[attr]) + + if chain_hashes: + for attr in digest.DEFAULT_CHAIN_ATTRS: + if chain_hashes[attr]: + tmp_a.attrs[attr] = " ".join(chain_hashes[attr]) + + # Now that tmp_a looks like the post-published action, transform + # it into a string using the generic sig_str method. + return generic.Action.sig_str(tmp_a, tmp_a, version) + + def actions_to_str(self, acts, version): + """Transforms a collection of actions into a string that is + used to sign those actions.""" + + # If a is None, then the action was another signature action so + # discard it from the information to be signed. + return "\n".join( + sorted( + ( + a + for a in (b.sig_str(self, version) for b in acts) + if a is not None + ) + ) + ) + + def retrieve_chain_certs(self, pub): + """Retrieve the chain certificates needed to validate this + signature.""" + + chain_attr, chain_val, hash_func = digest.get_least_preferred_hash( + self, hash_type=digest.CHAIN + ) + # We may not have any chain certs for this signature + if not chain_val: + return + for c in chain_val.split(): + pub.get_cert_by_hash(c, only_retrieve=True, hash_func=hash_func) + + def get_chain_certs(self, least_preferred=False): + """Return a list of the chain certificates needed to validate + this signature. When retrieving the content from the + repository, we use the "least preferred" hash for backwards + compatibility, but when verifying the content, we use the + "most preferred" hash.""" + + if least_preferred: + chain_attr, chain_val, hash_func = digest.get_least_preferred_hash( + self, hash_type=digest.CHAIN + ) + else: + chain_attr, chain_val, hash_func = digest.get_preferred_hash( + self, hash_type=digest.CHAIN + ) + if not chain_val: + return [] + return chain_val.split() + + def get_chain_certs_chashes(self, least_preferred=False): + """Return a list of the chain certificates needed to validate + this signature.""" + + if least_preferred: + ( + chain_chash_attr, + chain_chash_val, + hash_func, + ) = digest.get_least_preferred_hash( + self, hash_type=digest.CHAIN_CHASH + ) + else: + ( + chain_chash_attr, + chain_chash_val, + hash_func, + ) = digest.get_preferred_hash(self, hash_type=digest.CHAIN_CHASH) + if not chain_chash_val: + return [] + return chain_chash_val.split() + + def is_signed(self): + """Returns True if this action is signed using a key, instead + of simply being a hash. Since variant tagged signature + actions are not handled yet, it also returns False in that + case.""" + + return self.hash is not None and not self.get_variant_template() + + @staticmethod + def decompose_sig_alg(val): + """Split the sig_alg attribute up in to something useful.""" + + for s in valid_sig_algs: + for h in valid_hash_algs: + t = "{0}-{1}".format(s, h) + if val == t: + return s, h + for h in valid_hash_algs: + if h == val: + return None, h + return None, None + + def verify_sig( + self, acts, pub, trust_anchors, use_crls, required_names=None + ): + """Try to verify this signature. It can return True or + None. None means we didn't know how to verify this signature. + If we do know how to verify the signature but it doesn't verify, + then an exception is raised. + + The 'acts' parameter is the iterable of actions against which + to verify the signature. + + The 'pub' parameter is the publisher that published the + package this action signed. + + The 'trust_anchors' parameter contains the trust anchors to use + when verifying the signature. + + The 'required_names' parameter is a set of strings that must + be seen as a CN in the chain of trust for the certificate.""" + + ver = int(self.attrs["version"]) + # If this signature is tagged with variants, if the version is + # higher than one we know about, or it uses an unrecognized + # hash algorithm, we can't handle it yet. + if ( + self.get_variant_template() + or ver > generic.Action.sig_version + or not self.hash_alg + ): + return None + # Turning this into a list makes debugging vastly more + # tractable. + acts = list(acts) + # If self.hash is None, then the signature is storing a hash + # of the actions, not a signed value. + if self.hash is None: + assert self.sig_alg is None + h = hashlib.new(self.hash_alg) + h.update(misc.force_bytes(self.actions_to_str(acts, ver))) + computed_hash = h.digest() + # The attrs value is stored in hex so that it's easy + # to read. + if misc.hex_to_binary(self.attrs["value"]) != computed_hash: + raise apx.UnverifiedSignature( + self, + _( + "The signature value did not match the " + "expected value. action: {0}" + ).format(self), + ) + return True + # Verify a signature that's not just a hash. + if self.sig_alg is None: + return None + # Get the certificate paired with the key which signed this + # action. + attr, hash_val, hash_func = digest.get_least_preferred_hash(self) + cert = pub.get_cert_by_hash( + hash_val, verify_hash=True, hash_func=hash_func + ) + # Make sure that the intermediate certificates that are needed + # to validate this signature are present. + self.retrieve_chain_certs(pub) + try: + # This import is placed here to break a circular + # import seen when merge.py is used. + from pkg.client.publisher import CODE_SIGNING_USE + + # Verify the certificate whose key created this + # signature action. + pub.verify_chain( + cert, + trust_anchors, + 0, + use_crls, + required_names=required_names, + usages=CODE_SIGNING_USE, + ) + except apx.SigningException as e: + e.act = self + raise + # Check that the certificate verifies against this signature. + pub_key = cert.public_key() + hhash = self.__get_hash_by_name(self.hash_alg) + try: + pub_key.verify( + misc.hex_to_binary(self.attrs["value"]), + misc.force_bytes(self.actions_to_str(acts, ver)), + padding.PKCS1v15(), + hhash(), + ) + except InvalidSignature: + raise apx.UnverifiedSignature( + self, + _("The signature value did not match the expected " "value."), + ) + + return True + + def set_signature( + self, acts, key_path=None, chain_paths=misc.EmptyI, chash_dir=None + ): + """Sets the signature value for this action. + + The 'acts' parameter is the iterable of actions this action + should sign. + + The 'key_path' parameter is the path to the file containing the + private key which is used to sign the actions. + + The 'chain_paths' parameter is an iterable of paths to + certificates which are needed to form the chain of trust from + the certificate associated with the key in 'key_path' to one of + the CAs for the publisher of the actions. + + The 'chash_dir' parameter is the temporary directory to use + while calculating the compressed hashes for chain certs.""" + + # Turning this into a list makes debugging vastly more + # tractable. + acts = list(acts) + + # If key_path is None, then set value to be the hash + # of the actions. + if key_path is None: + # If no private key is set, then no certificate should + # have been given. + assert self.data is None + h = hashlib.new(self.hash_alg) + h.update( + misc.force_bytes( + self.actions_to_str(acts, generic.Action.sig_version) + ) + ) + self.attrs["value"] = h.hexdigest() + else: + # If a private key is used, then the certificate it's + # paired with must be provided. + assert self.data is not None + self.__set_chain_certs_data(chain_paths, chash_dir) + + try: + with open(key_path, "rb") as f: + priv_key = serialization.load_pem_private_key( + f.read(), password=None, backend=default_backend() + ) + except ValueError: + raise apx.BadFileFormat( + _( + "{0} was expected to " + "be a RSA key but could not be read " + "correctly." + ).format(key_path) + ) + + hhash = self.__get_hash_by_name(self.hash_alg) + self.attrs["value"] = misc.binary_to_hex( + priv_key.sign( + misc.force_bytes( + self.actions_to_str(acts, generic.Action.sig_version) + ), + padding.PKCS1v15(), + hhash(), + ) + ) + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + res = [] + if self.hash is not None: + res.append((self.name, "certificate", self.hash, self.hash)) + res.append( + ( + self.name, + "algorithm", + self.attrs["algorithm"], + self.attrs["algorithm"], + ) + ) + res.append( + (self.name, "signature", self.attrs["value"], self.attrs["value"]) + ) + for attr in digest.DEFAULT_HASH_ATTRS: + # We already have an index entry for self.hash; + # we only want hash attributes other than "hash". + hash = self.attrs.get(attr) + if attr != "hash" and hash is not None: + res.append((self.name, attr, hash, None)) + return res + + def identical(self, other, hsh): + """Check whether another action is identical to this + signature.""" + # Only signature actions can be identical to other signature + # actions. + if self.name != other.name: + return False + # If the code signing certs are identical, the more checking is + # needed. + # Determine if we share any hash attribute values with the other + # action. + matching_hash_attrs = set() + for attr in digest.DEFAULT_HASH_ATTRS: + if attr == "hash": + # we deal with the 'hash' member later + continue + if ( + attr in self.attrs + and attr in other.attrs + and self.attrs[attr] == other.attrs[attr] + and self.assrs[attr] + ): + matching_hash_attrs.add(attr) + if hsh and hsh == other.attrs.get(attr): + # Technically 'hsh' isn't a hash attr, it's + # a hash attr value, but that's enough for us + # to consider it as potentially identical. + matching_hash_attrs.add(hsh) + + if hsh == other.hash or self.hash == other.hash or matching_hash_attrs: + # If the algorithms are using different algorithms or + # have different versions, then they're not identical. + if ( + self.attrs["algorithm"] != other.attrs["algorithm"] + or self.attrs["version"] != other.attrs["version"] + ): return False + # If the values are the same, then they're identical. + if self.attrs["value"] == other.attrs["value"]: + return True + raise apx.AlmostIdentical( + hsh, self.attrs["algorithm"], self.attrs["version"] + ) + return False + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action. + """ + + # 'value' can only be required at publication time since signing + # relies on the ability to construct actions without one despite + # the fact that it is the key attribute. + generic.Action._validate( + self, + fmri=fmri, + numeric_attrs=("pkg.csize", "pkg.size"), + required_attrs=("value",), + single_attrs=("algorithm", "chash", "value"), + ) - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action. - """ - - # 'value' can only be required at publication time since signing - # relies on the ability to construct actions without one despite - # the fact that it is the key attribute. - generic.Action._validate(self, fmri=fmri, - numeric_attrs=("pkg.csize", "pkg.size"), - required_attrs=("value",), single_attrs=("algorithm", - "chash", "value")) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/unknown.py b/src/modules/actions/unknown.py index 8a33ee7e3..b27babef9 100644 --- a/src/modules/actions/unknown.py +++ b/src/modules/actions/unknown.py @@ -33,13 +33,15 @@ from . import generic + class UnknownAction(generic.Action): - """Class representing a unknown type of packaging object.""" + """Class representing a unknown type of packaging object.""" + + __slots__ = [] - __slots__ = [] + name = "unknown" + ordinality = generic._orderdict[name] - name = "unknown" - ordinality = generic._orderdict[name] # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/actions/user.py b/src/modules/actions/user.py index 38b4d6846..57bf55761 100644 --- a/src/modules/actions/user.py +++ b/src/modules/actions/user.py @@ -31,347 +31,386 @@ a new user.""" from . import generic + try: - from pkg.cfgfiles import * - have_cfgfiles = True + from pkg.cfgfiles import * + + have_cfgfiles = True except ImportError: - have_cfgfiles = False + have_cfgfiles = False import pkg.client.api_errors as apx import pkg.actions + class UserAction(generic.Action): - """Class representing a user packaging object.""" - - __slots__ = [] - - name = "user" - key_attr = "username" - globally_identical = True - ordinality = generic._orderdict[name] - - # if these values are different on disk than in action - # prefer on-disk version for actual login accounts (root) - use_existing_attrs = [ "password", "lastchg", "min", - "max", "expire", "flag", - "warn", "inactive"] - mutable_passwords = frozenset(("UP", "")) - - def as_set(self, item): - if isinstance(item, list): - return set(item) - return set([item]) - - def merge(self, old_plan, on_disk): - """ three way attribute merge between old manifest, - what's on disk and new manifest. For any values - on disk that are not in the new plan, use the values - on disk. Use new plan values unless attribute is - in self.use_existing_attrs, or if old manifest and - on-disk copy match....""" - - out = self.attrs.copy() - - for attr in on_disk: - if (attr in out and - attr not in self.use_existing_attrs) or \ - (attr in old_plan and - old_plan[attr] == on_disk[attr]): - continue - - # preserve UID if not specified explicitly - if attr == "uid" and attr not in out: - out[attr] = on_disk[attr] - continue - - # prefer manifest version if not mutable password - if attr == "password" and \ - out[attr] not in self.mutable_passwords: - continue - - # Only prefer on-disk entries if password is - # user-settable (e.g. '' or UP). - if "password" not in out or out["password"] not in \ - self.mutable_passwords: - continue - - if attr != "group-list": - out[attr] = on_disk[attr] - else: - out[attr] = list( - self.as_set(out.get(attr, [])) | - self.as_set(on_disk[attr])) - return out - - def readstate(self, image, username, lock=False): - """read state of user from files. May raise KeyError""" - root = image.get_root() - pw = PasswordFile(root, lock) - gr = GroupFile(image) - ftp = FtpusersFile(root) - - username = self.attrs["username"] - - cur_attrs = pw.getuser(username) - if "gid" in cur_attrs: - cur_attrs["group"] = \ - image.get_name_by_gid(int(cur_attrs["gid"])) - - grps = gr.getgroups(username) - if grps: - cur_attrs["group-list"] = grps - - cur_attrs["ftpuser"] = str(ftp.getuser(username)).lower() - - return (pw, gr, ftp, cur_attrs) - - - def install(self, pkgplan, orig, retry=False): - """client-side method that adds the user... - update any attrs that changed from orig - unless the on-disk stuff was changed""" - - if not have_cfgfiles: - # The user action is ignored if cfgfiles is not - # available. - return - - username = self.attrs["username"] - - try: - pw, gr, ftp, cur_attrs = \ - self.readstate(pkgplan.image, username, lock=True) - - self.attrs["gid"] = str(pkgplan.image.get_group_by_name( - self.attrs["group"])) - - orig_attrs = {} - default_attrs = pw.getdefaultvalues() - if orig: - # Grab default values from files, extend by - # specifics from original manifest for - # comparisons sake. - orig_attrs.update(default_attrs) - orig_attrs["group-list"] = [] - orig_attrs["ftpuser"] = "true" - orig_attrs.update(orig.attrs) - else: - # If we're installing a user for the first time, - # we want to override whatever value might be - # represented by the presence or absence of the - # user in the ftpusers file. Remove the value - # from the representation of the file so that - # the new value takes precedence in the merge. - del cur_attrs["ftpuser"] - - # add default values to new attrs if not present - for attr in default_attrs: - if attr not in self.attrs: - self.attrs[attr] = default_attrs[attr] - - self.attrs["group-list"] = self.attrlist("group-list") - final_attrs = self.merge(orig_attrs, cur_attrs) - - pw.setvalue(final_attrs) - - if "group-list" in final_attrs: - gr.setgroups(username, - final_attrs["group-list"]) - - ftp.setuser(username, - final_attrs.get("ftpuser", "true") == "true") - - pw.writefile() - gr.writefile() - ftp.writefile() - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise - # If we're in the postinstall phase and the files - # *still* aren't there, bail gracefully. - if retry: - txt = _("User cannot be installed without user " - "database files present.") - raise apx.ActionExecutionError(self, error=e, - details=txt, fmri=pkgplan.destination_fmri) - img = pkgplan.image - img._users.add(self) - if "uid" in self.attrs: - img._usersbyname[self.attrs["username"]] = \ - int(self.attrs["uid"]) - raise pkg.actions.ActionRetry(self) - except KeyError as e: - # cannot find group - self.validate() # should raise error if no group in action - txt = _("{group} is an unknown or invalid group").format( - group=self.attrs.get("group", "None")) - raise apx.ActionExecutionError(self, - details=txt, fmri=pkgplan.destination_fmri) - - finally: - if "pw" in locals(): - pw.unlock() - - def retry(self, pkgplan, orig): - users = pkgplan.image._users - if users: - assert self in users - self.install(pkgplan, orig, retry=True) - - def verify(self, img, **args): - """Returns a tuple of lists of the form (errors, warnings, - info). The error list will be empty if the action has been - correctly installed in the given image.""" - - errors = [] - warnings = [] - info = [] - - if not have_cfgfiles: - # The user action is ignored if cfgfiles is not - # available. - return errors, warnings, info - - username = self.attrs["username"] - - try: - pw, gr, ftp, cur_attrs = self.readstate(img, username) - except EnvironmentError as e: - if e.errno == errno.EACCES: - errors.append(_("Skipping: Permission denied")) - else: - errors.append(_("Unexpected Error: {0}").format(e)) - return errors, warnings, info - except KeyError as e: - errors.append(_("{group} is an unknown or invalid group").format( - group=self.attrs.get("group", "None"))) - return errors, warnings, info - - if "group-list" in self.attrs: - self.attrs["group-list"] = \ - sorted(self.attrlist("group-list")) - - # Get the default values if they're non-empty - pwdefval = dict(( - (k, v) - for k, v in six.iteritems(pw.getdefaultvalues()) - if v != "" - )) - - # Certain defaults are dynamic, so we need to ignore what's on - # disk - if "gid" not in self.attrs: - cur_attrs["gid"] = "" - if "uid" not in self.attrs: - cur_attrs["uid"] = "" - if "lastchg" not in self.attrs: - cur_attrs["lastchg"] = "" - if "login-shell" not in self.attrs: - cur_attrs["login-shell"] = "" - - pwdefval["ftpuser"] = "true" - should_be = pwdefval.copy() - should_be.update(self.attrs) - - # ignore changes in certain fields if password is - # mutable; this indicates that this account is used - # by a human and logins, timeouts, etc. are changable. - if should_be["password"] in self.mutable_passwords: - for attr in self.use_existing_attrs: - if attr in should_be: - cur_attrs[attr] = should_be[attr] - else: - if attr in cur_attrs: - del cur_attrs[attr] - - if "shell-change-ok" in self.attrs: - del should_be["shell-change-ok"] - if self.attrs["shell-change-ok"].lower() == "true": - cur_attrs["login-shell"] = should_be["login-shell"] - - # always ignore flag - if "flag" in cur_attrs: - del cur_attrs["flag"] - # Note where attributes are missing - for k in should_be: - cur_attrs.setdefault(k, "") - # Note where attributes should be empty - for k in cur_attrs: - if cur_attrs[k]: - should_be.setdefault(k, "") - - errors.extend( - _("{entry}: '{found}' should be '{expected}'").format( - entry=a, found=cur_attrs[a], - expected=should_be[a]) - for a in should_be - if cur_attrs[a] != should_be[a] + """Class representing a user packaging object.""" + + __slots__ = [] + + name = "user" + key_attr = "username" + globally_identical = True + ordinality = generic._orderdict[name] + + # if these values are different on disk than in action + # prefer on-disk version for actual login accounts (root) + use_existing_attrs = [ + "password", + "lastchg", + "min", + "max", + "expire", + "flag", + "warn", + "inactive", + ] + mutable_passwords = frozenset(("UP", "")) + + def as_set(self, item): + if isinstance(item, list): + return set(item) + return set([item]) + + def merge(self, old_plan, on_disk): + """three way attribute merge between old manifest, + what's on disk and new manifest. For any values + on disk that are not in the new plan, use the values + on disk. Use new plan values unless attribute is + in self.use_existing_attrs, or if old manifest and + on-disk copy match....""" + + out = self.attrs.copy() + + for attr in on_disk: + if (attr in out and attr not in self.use_existing_attrs) or ( + attr in old_plan and old_plan[attr] == on_disk[attr] + ): + continue + + # preserve UID if not specified explicitly + if attr == "uid" and attr not in out: + out[attr] = on_disk[attr] + continue + + # prefer manifest version if not mutable password + if attr == "password" and out[attr] not in self.mutable_passwords: + continue + + # Only prefer on-disk entries if password is + # user-settable (e.g. '' or UP). + if ( + "password" not in out + or out["password"] not in self.mutable_passwords + ): + continue + + if attr != "group-list": + out[attr] = on_disk[attr] + else: + out[attr] = list( + self.as_set(out.get(attr, [])) | self.as_set(on_disk[attr]) + ) + return out + + def readstate(self, image, username, lock=False): + """read state of user from files. May raise KeyError""" + root = image.get_root() + pw = PasswordFile(root, lock) + gr = GroupFile(image) + ftp = FtpusersFile(root) + + username = self.attrs["username"] + + cur_attrs = pw.getuser(username) + if "gid" in cur_attrs: + cur_attrs["group"] = image.get_name_by_gid(int(cur_attrs["gid"])) + + grps = gr.getgroups(username) + if grps: + cur_attrs["group-list"] = grps + + cur_attrs["ftpuser"] = str(ftp.getuser(username)).lower() + + return (pw, gr, ftp, cur_attrs) + + def install(self, pkgplan, orig, retry=False): + """client-side method that adds the user... + update any attrs that changed from orig + unless the on-disk stuff was changed""" + + if not have_cfgfiles: + # The user action is ignored if cfgfiles is not + # available. + return + + username = self.attrs["username"] + + try: + pw, gr, ftp, cur_attrs = self.readstate( + pkgplan.image, username, lock=True + ) + + self.attrs["gid"] = str( + pkgplan.image.get_group_by_name(self.attrs["group"]) + ) + + orig_attrs = {} + default_attrs = pw.getdefaultvalues() + if orig: + # Grab default values from files, extend by + # specifics from original manifest for + # comparisons sake. + orig_attrs.update(default_attrs) + orig_attrs["group-list"] = [] + orig_attrs["ftpuser"] = "true" + orig_attrs.update(orig.attrs) + else: + # If we're installing a user for the first time, + # we want to override whatever value might be + # represented by the presence or absence of the + # user in the ftpusers file. Remove the value + # from the representation of the file so that + # the new value takes precedence in the merge. + del cur_attrs["ftpuser"] + + # add default values to new attrs if not present + for attr in default_attrs: + if attr not in self.attrs: + self.attrs[attr] = default_attrs[attr] + + self.attrs["group-list"] = self.attrlist("group-list") + final_attrs = self.merge(orig_attrs, cur_attrs) + + pw.setvalue(final_attrs) + + if "group-list" in final_attrs: + gr.setgroups(username, final_attrs["group-list"]) + + ftp.setuser(username, final_attrs.get("ftpuser", "true") == "true") + + pw.writefile() + gr.writefile() + ftp.writefile() + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + # If we're in the postinstall phase and the files + # *still* aren't there, bail gracefully. + if retry: + txt = _( + "User cannot be installed without user " + "database files present." ) - return errors, warnings, info - - def remove(self, pkgplan): - """client-side method that removes this user""" - if not have_cfgfiles: - # The user action is ignored if cfgfiles is not - # available. - return - - root = pkgplan.image.get_root() - pw = PasswordFile(root, lock=True) - try: - gr = GroupFile(pkgplan.image) - ftp = FtpusersFile(root) - - pw.removevalue(self.attrs) - gr.removeuser(self.attrs["username"]) - - # negative logic - ftp.setuser(self.attrs["username"], True) - - pw.writefile() - gr.writefile() - ftp.writefile() - except KeyError as e: - # Already gone; don't care. - if e.args[0] != (self.attrs["username"],): - raise - finally: - pw.unlock() - - def generate_indices(self): - """Generates the indices needed by the search dictionary. See - generic.py for a more detailed explanation.""" - - return [("user", "name", self.attrs["username"], None)] - - def validate(self, fmri=None): - """Performs additional validation of action attributes that - for performance or other reasons cannot or should not be done - during Action object creation. An ActionError exception (or - subclass of) will be raised if any attributes are not valid. - This is primarily intended for use during publication or during - error handling to provide additional diagonostics. - - 'fmri' is an optional package FMRI (object or string) indicating - what package contained this action. - """ - - generic.Action._validate(self, fmri=fmri, - numeric_attrs=("uid", "lastchg", "min", "max", "warn", - "inactive","expire", "flag"), single_attrs=("password", - "uid", "group", "gcos-field", "home-dir", "login-shell", - "ftpuser", "lastchg", "min", "max", "warn", "inactive", - "expire", "flag"), - required_attrs=("group",)) - - def compare(self, other): - """Arrange for user actions to be installed in uid order. This - will only hold true for actions installed at one time, but that's - generally what we need on initial install.""" - # put unspecified uids at the end - a = int(self.attrs.get("uid", 1024)) - b = int(other.attrs.get("uid", 1024)) - return (a > b) - (a < b) + raise apx.ActionExecutionError( + self, error=e, details=txt, fmri=pkgplan.destination_fmri + ) + img = pkgplan.image + img._users.add(self) + if "uid" in self.attrs: + img._usersbyname[self.attrs["username"]] = int( + self.attrs["uid"] + ) + raise pkg.actions.ActionRetry(self) + except KeyError as e: + # cannot find group + self.validate() # should raise error if no group in action + txt = _("{group} is an unknown or invalid group").format( + group=self.attrs.get("group", "None") + ) + raise apx.ActionExecutionError( + self, details=txt, fmri=pkgplan.destination_fmri + ) + + finally: + if "pw" in locals(): + pw.unlock() + + def retry(self, pkgplan, orig): + users = pkgplan.image._users + if users: + assert self in users + self.install(pkgplan, orig, retry=True) + + def verify(self, img, **args): + """Returns a tuple of lists of the form (errors, warnings, + info). The error list will be empty if the action has been + correctly installed in the given image.""" + + errors = [] + warnings = [] + info = [] + + if not have_cfgfiles: + # The user action is ignored if cfgfiles is not + # available. + return errors, warnings, info + + username = self.attrs["username"] + + try: + pw, gr, ftp, cur_attrs = self.readstate(img, username) + except EnvironmentError as e: + if e.errno == errno.EACCES: + errors.append(_("Skipping: Permission denied")) + else: + errors.append(_("Unexpected Error: {0}").format(e)) + return errors, warnings, info + except KeyError as e: + errors.append( + _("{group} is an unknown or invalid group").format( + group=self.attrs.get("group", "None") + ) + ) + return errors, warnings, info + + if "group-list" in self.attrs: + self.attrs["group-list"] = sorted(self.attrlist("group-list")) + + # Get the default values if they're non-empty + pwdefval = dict( + ((k, v) for k, v in six.iteritems(pw.getdefaultvalues()) if v != "") + ) + + # Certain defaults are dynamic, so we need to ignore what's on + # disk + if "gid" not in self.attrs: + cur_attrs["gid"] = "" + if "uid" not in self.attrs: + cur_attrs["uid"] = "" + if "lastchg" not in self.attrs: + cur_attrs["lastchg"] = "" + if "login-shell" not in self.attrs: + cur_attrs["login-shell"] = "" + + pwdefval["ftpuser"] = "true" + should_be = pwdefval.copy() + should_be.update(self.attrs) + + # ignore changes in certain fields if password is + # mutable; this indicates that this account is used + # by a human and logins, timeouts, etc. are changable. + if should_be["password"] in self.mutable_passwords: + for attr in self.use_existing_attrs: + if attr in should_be: + cur_attrs[attr] = should_be[attr] + else: + if attr in cur_attrs: + del cur_attrs[attr] + + if "shell-change-ok" in self.attrs: + del should_be["shell-change-ok"] + if self.attrs["shell-change-ok"].lower() == "true": + cur_attrs["login-shell"] = should_be["login-shell"] + + # always ignore flag + if "flag" in cur_attrs: + del cur_attrs["flag"] + # Note where attributes are missing + for k in should_be: + cur_attrs.setdefault(k, "") + # Note where attributes should be empty + for k in cur_attrs: + if cur_attrs[k]: + should_be.setdefault(k, "") + + errors.extend( + _("{entry}: '{found}' should be '{expected}'").format( + entry=a, found=cur_attrs[a], expected=should_be[a] + ) + for a in should_be + if cur_attrs[a] != should_be[a] + ) + return errors, warnings, info + + def remove(self, pkgplan): + """client-side method that removes this user""" + if not have_cfgfiles: + # The user action is ignored if cfgfiles is not + # available. + return + + root = pkgplan.image.get_root() + pw = PasswordFile(root, lock=True) + try: + gr = GroupFile(pkgplan.image) + ftp = FtpusersFile(root) + + pw.removevalue(self.attrs) + gr.removeuser(self.attrs["username"]) + + # negative logic + ftp.setuser(self.attrs["username"], True) + + pw.writefile() + gr.writefile() + ftp.writefile() + except KeyError as e: + # Already gone; don't care. + if e.args[0] != (self.attrs["username"],): + raise + finally: + pw.unlock() + + def generate_indices(self): + """Generates the indices needed by the search dictionary. See + generic.py for a more detailed explanation.""" + + return [("user", "name", self.attrs["username"], None)] + + def validate(self, fmri=None): + """Performs additional validation of action attributes that + for performance or other reasons cannot or should not be done + during Action object creation. An ActionError exception (or + subclass of) will be raised if any attributes are not valid. + This is primarily intended for use during publication or during + error handling to provide additional diagonostics. + + 'fmri' is an optional package FMRI (object or string) indicating + what package contained this action. + """ + + generic.Action._validate( + self, + fmri=fmri, + numeric_attrs=( + "uid", + "lastchg", + "min", + "max", + "warn", + "inactive", + "expire", + "flag", + ), + single_attrs=( + "password", + "uid", + "group", + "gcos-field", + "home-dir", + "login-shell", + "ftpuser", + "lastchg", + "min", + "max", + "warn", + "inactive", + "expire", + "flag", + ), + required_attrs=("group",), + ) + + def compare(self, other): + """Arrange for user actions to be installed in uid order. This + will only hold true for actions installed at one time, but that's + generally what we need on initial install.""" + # put unspecified uids at the end + a = int(self.attrs.get("uid", 1024)) + b = int(other.attrs.get("uid", 1024)) + return (a > b) - (a < b) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/altroot.py b/src/modules/altroot.py index 0dae68cb9..17b1e879a 100644 --- a/src/modules/altroot.py +++ b/src/modules/altroot.py @@ -44,367 +44,379 @@ # has native support for all the *at(2) system calls we use below. import pkg.syscallat as sat + # --------------------------------------------------------------------------- # Misc Functions # def __path_abs_to_relative(path): - """Strip the leading '/' from a path using os.path.split().""" + """Strip the leading '/' from a path using os.path.split().""" + + path_new = None + while True: + (path, tail) = os.path.split(path) + if not tail: + break + if path_new: + path_new = os.path.join(tail, path_new) + else: + path_new = tail + return path_new - path_new = None - while True: - (path, tail) = os.path.split(path) - if not tail: - break - if path_new: - path_new = os.path.join(tail, path_new) - else: - path_new = tail - return path_new def __fd_to_path(fd): - """Given a file descriptor return the path to that file descriptor.""" + """Given a file descriptor return the path to that file descriptor.""" + + path = "/proc/{0:d}/path/{1:d}".format(os.getpid(), fd) + return os.readlink(path) - path = "/proc/{0:d}/path/{1:d}".format(os.getpid(), fd) - return os.readlink(path) # --------------------------------------------------------------------------- # Functions for accessing files in an alternate image # -def ar_open(root, path, flags, - mode=None, create=False, truncate=False): - """A function similar to os.open() that ensures that the path - we're accessing resides within a specified directory subtree. - - 'root' is a directory that path must reside in. - - 'path' is a path that is interpreted relative to 'root'. i.e., 'root' - is prepended to path. 'path' can not contain any symbolic links that - would cause an access to be redirected outside of 'root'. If this - happens we'll raise an OSError exception with errno set to EREMOTE - - 'mode' optional permissions mask used if we create 'path' - - 'create' optional flag indicating if we should create 'path' - - 'truncate' optional flag indicating if we should truncate 'path' after - opening it.""" - - # all paths must be absolute - assert os.path.isabs(root) - - # only allow read/write flags - assert (flags & ~(os.O_WRONLY|os.O_RDONLY)) == 0 - - # we can't truncate a file unless we open it for writing - assert not truncate or (flags & os.O_WRONLY) - - # if create is true the user must supply a mode mask - assert not create or mode != None - - # we're going to update root and path so prepare an error - # message with the existing values now. - eremote = _("Path outside alternate root: root={root}, " - "path={path}").format(root=root, path=path) - - # make target into a relative path - if os.path.isabs(path): - path = __path_abs_to_relative(path) - - # now open the alternate root and get its path - # done to eliminate any links/mounts/etc in the path - root_fd = os.open(root, os.O_RDONLY) +def ar_open(root, path, flags, mode=None, create=False, truncate=False): + """A function similar to os.open() that ensures that the path + we're accessing resides within a specified directory subtree. + + 'root' is a directory that path must reside in. + + 'path' is a path that is interpreted relative to 'root'. i.e., 'root' + is prepended to path. 'path' can not contain any symbolic links that + would cause an access to be redirected outside of 'root'. If this + happens we'll raise an OSError exception with errno set to EREMOTE + + 'mode' optional permissions mask used if we create 'path' + + 'create' optional flag indicating if we should create 'path' + + 'truncate' optional flag indicating if we should truncate 'path' after + opening it.""" + + # all paths must be absolute + assert os.path.isabs(root) + + # only allow read/write flags + assert (flags & ~(os.O_WRONLY | os.O_RDONLY)) == 0 + + # we can't truncate a file unless we open it for writing + assert not truncate or (flags & os.O_WRONLY) + + # if create is true the user must supply a mode mask + assert not create or mode != None + + # we're going to update root and path so prepare an error + # message with the existing values now. + eremote = _( + "Path outside alternate root: root={root}, " "path={path}" + ).format(root=root, path=path) + + # make target into a relative path + if os.path.isabs(path): + path = __path_abs_to_relative(path) + + # now open the alternate root and get its path + # done to eliminate any links/mounts/etc in the path + root_fd = os.open(root, os.O_RDONLY) + try: + root = __fd_to_path(root_fd) + except OSError as e: + if e.errno != errno.ENOENT: + os.close(root_fd) + raise e + os.close(root_fd) + + # now open the target file, get its path, and make sure it + # lives in the alternate root + path_fd = None + try: + path_tmp = os.path.join(root, path) + path_fd = os.open(path_tmp, flags) + except OSError as e: + if e.errno != errno.ENOENT or not create: + raise e + + assert path_fd or create + if not path_fd: + # the file doesn't exist so we should try to create it. + # we'll do this by first opening the directory which + # will contain the file and then using openat within + # that directory. + path_dir = os.path.dirname(path) + path_file = os.path.basename(path) try: - root = __fd_to_path(root_fd) + path_dir_fd = ar_open(root, path_dir, os.O_RDONLY) except OSError as e: - if e.errno != errno.ENOENT: - os.close(root_fd) - raise e - os.close(root_fd) - - # now open the target file, get its path, and make sure it - # lives in the alternate root - path_fd = None + if e.errno != errno.EREMOTE: + raise e + raise OSError(errno.EREMOTE, eremote) + + # we opened the directory, now create the file try: - path_tmp = os.path.join(root, path) - path_fd = os.open(path_tmp, flags) + path_fd = sat.openat( + path_dir_fd, path_file, flags | os.O_CREAT | os.O_EXCL, mode + ) except OSError as e: - if e.errno != errno.ENOENT or not create: - raise e - - assert path_fd or create - if not path_fd: - # the file doesn't exist so we should try to create it. - # we'll do this by first opening the directory which - # will contain the file and then using openat within - # that directory. - path_dir = os.path.dirname(path) - path_file = os.path.basename(path) - try: - path_dir_fd = \ - ar_open(root, path_dir, os.O_RDONLY) - except OSError as e: - if e.errno != errno.EREMOTE: - raise e - raise OSError(errno.EREMOTE, eremote) - - # we opened the directory, now create the file - try: - path_fd = sat.openat(path_dir_fd, path_file, - flags|os.O_CREAT|os.O_EXCL, mode) - except OSError as e: - os.close(path_dir_fd) - raise e - - # we created the file - assert path_fd - os.close(path_dir_fd) - - # verify that the file we opened lives in the alternate root + os.close(path_dir_fd) + raise e + + # we created the file + assert path_fd + os.close(path_dir_fd) + + # verify that the file we opened lives in the alternate root + try: + path = __fd_to_path(path_fd) + except OSError as e: + if e.errno != errno.ENOENT: + os.close(path_fd) + raise e + path = os.path.join(root, path) + + if not path.startswith(root): + os.close(path_fd) + raise OSError(errno.EREMOTE, eremote) + + if truncate: + # the user wanted us to truncate the file try: - path = __fd_to_path(path_fd) + os.ftruncate(path_fd, 0) except OSError as e: - if e.errno != errno.ENOENT: - os.close(path_fd) - raise e - path = os.path.join(root, path) - - if not path.startswith(root): - os.close(path_fd) - raise OSError(errno.EREMOTE, eremote) + os.close(path_fd) + raise e - if truncate: - # the user wanted us to truncate the file - try: - os.ftruncate(path_fd, 0) - except OSError as e: - os.close(path_fd) - raise e + return path_fd - return path_fd def ar_unlink(root, path, noent_ok=False): - """A function similar to os.unlink() that ensures that the path - we're accessing resides within a specified directory subtree. + """A function similar to os.unlink() that ensures that the path + we're accessing resides within a specified directory subtree. - 'noent_ok' optional flag indicating if it's ok for 'path' to be - missing. + 'noent_ok' optional flag indicating if it's ok for 'path' to be + missing. - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" - # all paths must be absolute - assert os.path.isabs(root) + # all paths must be absolute + assert os.path.isabs(root) - # make target into a relative path - if os.path.isabs(path): - path = __path_abs_to_relative(path) + # make target into a relative path + if os.path.isabs(path): + path = __path_abs_to_relative(path) - path_dir = os.path.dirname(path) - path_file = os.path.basename(path) - - try: - path_dir_fd = ar_open(root, path_dir, os.O_RDONLY) - except OSError as e: - if noent_ok and e.errno == errno.ENOENT: - return - raise e - - try: - sat.unlinkat(path_dir_fd, path_file, 0) - except OSError as e: - os.close(path_dir_fd) - if noent_ok and e.errno == errno.ENOENT: - return - raise e + path_dir = os.path.dirname(path) + path_file = os.path.basename(path) + try: + path_dir_fd = ar_open(root, path_dir, os.O_RDONLY) + except OSError as e: + if noent_ok and e.errno == errno.ENOENT: + return + raise e + + try: + sat.unlinkat(path_dir_fd, path_file, 0) + except OSError as e: os.close(path_dir_fd) - return - -def ar_rename(root, src, dst): - """A function similar to os.rename() that ensures that the path - we're accessing resides within a specified directory subtree. - - 'src' and 'dst' are paths that are interpreted relative to 'root'. - i.e., 'root' is prepended to both. 'src' and 'dst' can not contain - any symbolic links that would cause an access to be redirected outside - of 'root'. If this happens we'll raise an OSError exception with - errno set to EREMOTE + if noent_ok and e.errno == errno.ENOENT: + return + raise e - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" + os.close(path_dir_fd) + return - # all paths must be absolute - assert os.path.isabs(root) - # make target into a relative path - if os.path.isabs(src): - src = __path_abs_to_relative(src) - if os.path.isabs(dst): - dst = __path_abs_to_relative(dst) - - src_dir = os.path.dirname(src) - src_file = os.path.basename(src) - dst_dir = os.path.dirname(dst) - dst_file = os.path.basename(dst) - - src_dir_fd = ar_open(root, src_dir, os.O_RDONLY) - try: - dst_dir_fd = ar_open(root, dst_dir, os.O_RDONLY) - except OSError as e: - os.close(src_dir_fd) - raise e - - try: - sat.renameat(src_dir_fd, src_file, dst_dir_fd, dst_file) - except OSError as e: - os.close(src_dir_fd) - os.close(dst_dir_fd) - raise e +def ar_rename(root, src, dst): + """A function similar to os.rename() that ensures that the path + we're accessing resides within a specified directory subtree. + + 'src' and 'dst' are paths that are interpreted relative to 'root'. + i.e., 'root' is prepended to both. 'src' and 'dst' can not contain + any symbolic links that would cause an access to be redirected outside + of 'root'. If this happens we'll raise an OSError exception with + errno set to EREMOTE + + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" + + # all paths must be absolute + assert os.path.isabs(root) + + # make target into a relative path + if os.path.isabs(src): + src = __path_abs_to_relative(src) + if os.path.isabs(dst): + dst = __path_abs_to_relative(dst) + + src_dir = os.path.dirname(src) + src_file = os.path.basename(src) + dst_dir = os.path.dirname(dst) + dst_file = os.path.basename(dst) + + src_dir_fd = ar_open(root, src_dir, os.O_RDONLY) + try: + dst_dir_fd = ar_open(root, dst_dir, os.O_RDONLY) + except OSError as e: + os.close(src_dir_fd) + raise e + try: + sat.renameat(src_dir_fd, src_file, dst_dir_fd, dst_file) + except OSError as e: os.close(src_dir_fd) os.close(dst_dir_fd) - return + raise e -def ar_mkdir(root, path, mode, exists_is_ok=False): - """A function similar to os.mkdir() that ensures that the path we're - opening resides within a specified directory subtree. + os.close(src_dir_fd) + os.close(dst_dir_fd) + return - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" - # all paths must be absolute - assert os.path.isabs(root) +def ar_mkdir(root, path, mode, exists_is_ok=False): + """A function similar to os.mkdir() that ensures that the path we're + opening resides within a specified directory subtree. - # make target into a relative path - if os.path.isabs(path): - path = __path_abs_to_relative(path) + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" - path_dir = os.path.dirname(path) - path_file = os.path.basename(path) + # all paths must be absolute + assert os.path.isabs(root) - path_dir_fd = ar_open(root, path_dir, os.O_RDONLY) - try: - sat.mkdirat(path_dir_fd, path_file, mode) - except OSError as e: - os.close(path_dir_fd) - if exists_is_ok and e.errno == errno.EEXIST: - return - raise e + # make target into a relative path + if os.path.isabs(path): + path = __path_abs_to_relative(path) + path_dir = os.path.dirname(path) + path_file = os.path.basename(path) + + path_dir_fd = ar_open(root, path_dir, os.O_RDONLY) + try: + sat.mkdirat(path_dir_fd, path_file, mode) + except OSError as e: os.close(path_dir_fd) - return + if exists_is_ok and e.errno == errno.EEXIST: + return + raise e + + os.close(path_dir_fd) + return + def ar_stat(root, path): - """A function similar to os.stat() that ensures that the path - we're accessing resides within a specified directory subtree. + """A function similar to os.stat() that ensures that the path + we're accessing resides within a specified directory subtree. - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" + + try: + fd = ar_open(root, path, os.O_RDONLY) + except OSError as e: + raise e + si = os.fstat(fd) + os.close(fd) + return si - try: - fd = ar_open(root, path, os.O_RDONLY) - except OSError as e: - raise e - si = os.fstat(fd) - os.close(fd) - return si def ar_isdir(root, path): - """A function similar to os.path.isdir() that ensures that the path - we're accessing resides within a specified directory subtree. + """A function similar to os.path.isdir() that ensures that the path + we're accessing resides within a specified directory subtree. - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" - try: - si = ar_stat(root, path) - except OSError as e: - if e.errno == errno.ENOENT: - return False - raise e + try: + si = ar_stat(root, path) + except OSError as e: + if e.errno == errno.ENOENT: + return False + raise e + + if stat.S_ISDIR(si.st_mode): + return True + return False - if stat.S_ISDIR(si.st_mode): - return True - return False def ar_exists(root, path): - """A function similar to os.path.exists() that ensures that the path - we're accessing resides within a specified directory subtree. + """A function similar to os.path.exists() that ensures that the path + we're accessing resides within a specified directory subtree. - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" + + try: + fd = ar_open(root, path, os.O_RDONLY) + except OSError as e: + if e.errno == errno.ENOENT: + return False + raise e + os.close(fd) + return True - try: - fd = ar_open(root, path, os.O_RDONLY) - except OSError as e: - if e.errno == errno.ENOENT: - return False - raise e - os.close(fd) - return True def ar_diff(root, path1, path2): - """A function similar to filecmp.cmp() that ensures that the path - we're accessing resides within a specified directory subtree. + """A function similar to filecmp.cmp() that ensures that the path + we're accessing resides within a specified directory subtree. - For all other parameters, refer to the 'ar_open' function - for an explanation of their usage and effects.""" + For all other parameters, refer to the 'ar_open' function + for an explanation of their usage and effects.""" - fd1 = fd2 = None + fd1 = fd2 = None - diff = False - try: - fd1 = ar_open(root, path1, os.O_RDONLY) - fd2 = ar_open(root, path2, os.O_RDONLY) - - while True: - b1 = os.read(fd1, 1024) - b2 = os.read(fd2, 1024) - if len(b1) == 0 and len(b2) == 0: - # we're done - break - if len(b1) != len(b2) or b1 != b2: - diff = True - break - except OSError as e: - if fd1: - os.close(fd1) - if fd2: - os.close(fd2) - raise e + diff = False + try: + fd1 = ar_open(root, path1, os.O_RDONLY) + fd2 = ar_open(root, path2, os.O_RDONLY) + + while True: + b1 = os.read(fd1, 1024) + b2 = os.read(fd2, 1024) + if len(b1) == 0 and len(b2) == 0: + # we're done + break + if len(b1) != len(b2) or b1 != b2: + diff = True + break + except OSError as e: + if fd1: + os.close(fd1) + if fd2: + os.close(fd2) + raise e + + os.close(fd1) + os.close(fd2) + return diff - os.close(fd1) - os.close(fd2) - return diff def ar_img_prefix(root): - """A function that attempts to determine if a user or root pkg(7) - managed image can be found at 'root'. If 'root' does point to a - pkg(7) image, then we return the relative path to the image metadata - directory.""" - - import pkg.client.image as image - - user_img = False - root_img = False - - if ar_isdir(root, image.img_user_prefix): - user_img = True - - if ar_isdir(root, image.img_root_prefix): - root_img = True - - if user_img and root_img: - # - # why would an image have two pkg metadata directories. - # is this image corrupt? - # - return None - if user_img: - return image.img_user_prefix - if root_img: - return image.img_root_prefix + """A function that attempts to determine if a user or root pkg(7) + managed image can be found at 'root'. If 'root' does point to a + pkg(7) image, then we return the relative path to the image metadata + directory.""" + + import pkg.client.image as image + + user_img = False + root_img = False + + if ar_isdir(root, image.img_user_prefix): + user_img = True + + if ar_isdir(root, image.img_root_prefix): + root_img = True + + if user_img and root_img: + # + # why would an image have two pkg metadata directories. + # is this image corrupt? + # return None + if user_img: + return image.img_user_prefix + if root_img: + return image.img_root_prefix + return None + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/api_common.py b/src/modules/api_common.py index b649115ba..2dfd07bab 100644 --- a/src/modules/api_common.py +++ b/src/modules/api_common.py @@ -36,228 +36,267 @@ import pkg.fmri as fmri import pkg.misc as misc + class LicenseInfo(object): - """A class representing the license information a package - provides. Not intended for instantiation by API consumers.""" + """A class representing the license information a package + provides. Not intended for instantiation by API consumers.""" - def __init__(self, pfmri, act, img=None, text=None, alt_pub=None): - self.__action = act - self.__alt_pub = alt_pub - self.__fmri = pfmri - self.__img = img - self.__text = text + def __init__(self, pfmri, act, img=None, text=None, alt_pub=None): + self.__action = act + self.__alt_pub = alt_pub + self.__fmri = pfmri + self.__img = img + self.__text = text - def __str__(self): - return self.get_text() + def __str__(self): + return self.get_text() - def get_text(self): - """Retrieves and returns the payload of the license (which - should be text). This may require remote retrieval of - resources and so this could raise a TransportError or other - ApiException.""" + def get_text(self): + """Retrieves and returns the payload of the license (which + should be text). This may require remote retrieval of + resources and so this could raise a TransportError or other + ApiException.""" - if not self.__img: - return self.__text - return self.__action.get_text(self.__img, self.__fmri, - alt_pub=self.__alt_pub) + if not self.__img: + return self.__text + return self.__action.get_text( + self.__img, self.__fmri, alt_pub=self.__alt_pub + ) - @property - def fmri(self): - """The FMRI of the package this license is for.""" + @property + def fmri(self): + """The FMRI of the package this license is for.""" - return self.__fmri + return self.__fmri - @property - def license(self): - """The keyword identifying this license within its related - package.""" + @property + def license(self): + """The keyword identifying this license within its related + package.""" - return self.__action.attrs["license"] + return self.__action.attrs["license"] - @property - def must_accept(self): - """A boolean value indicating whether the license requires - acceptance.""" + @property + def must_accept(self): + """A boolean value indicating whether the license requires + acceptance.""" - return self.__action.must_accept + return self.__action.must_accept - @property - def must_display(self): - """A boolean value indicating whether the license must be - displayed during install or update operations.""" + @property + def must_display(self): + """A boolean value indicating whether the license must be + displayed during install or update operations.""" - return self.__action.must_display + return self.__action.must_display class PackageCategory(object): - """Represents the scheme and category of an info.classification entry - for a package.""" + """Represents the scheme and category of an info.classification entry + for a package.""" - scheme = None - category = None + scheme = None + category = None - def __init__(self, scheme, category): - self.scheme = scheme - self.category = category + def __init__(self, scheme, category): + self.scheme = scheme + self.category = category - def __str__(self, verbose=False): - if verbose: - return "{0} ({1})".format(self.category, self.scheme) - else: - return "{0}".format(self.category) + def __str__(self, verbose=False): + if verbose: + return "{0} ({1})".format(self.category, self.scheme) + else: + return "{0}".format(self.category) class PackageInfo(object): - """A class capturing the information about packages that a client - could need. The fmri is guaranteed to be set. All other values may - be None, depending on how the PackageInfo instance was created.""" - - # Possible package states; these constants should match the values used - # by the Image class. Constants with negative values are not currently - # available. - INCORPORATED = -2 - EXCLUDES = -3 - KNOWN = pkgdefs.PKG_STATE_KNOWN - INSTALLED = pkgdefs.PKG_STATE_INSTALLED - UPGRADABLE = pkgdefs.PKG_STATE_UPGRADABLE - OBSOLETE = pkgdefs.PKG_STATE_OBSOLETE - RENAMED = pkgdefs.PKG_STATE_RENAMED - LEGACY = pkgdefs.PKG_STATE_LEGACY - UNSUPPORTED = pkgdefs.PKG_STATE_UNSUPPORTED - FROZEN = pkgdefs.PKG_STATE_FROZEN - OPTIONAL = pkgdefs.PKG_STATE_OPTIONAL - MANUAL = pkgdefs.PKG_STATE_MANUAL - - __NUM_PROPS = 13 - IDENTITY, SUMMARY, CATEGORIES, STATE, SIZE, LICENSES, LINKS, \ - HARDLINKS, FILES, DIRS, DEPENDENCIES, DESCRIPTION, \ - ALL_ATTRIBUTES = range(__NUM_PROPS) - ALL_OPTIONS = frozenset(range(__NUM_PROPS)) - ACTION_OPTIONS = frozenset([LINKS, HARDLINKS, FILES, DIRS, - DEPENDENCIES]) - - def __init__(self, pfmri, pkg_stem=None, summary=None, - category_info_list=None, states=None, publisher=None, - version=None, build_release=None, branch=None, packaging_date=None, - size=None, csize=None, licenses=None, links=None, hardlinks=None, - files=None, dirs=None, dependencies=None, description=None, - attrs=None, last_update=None, last_install=None): - self.pkg_stem = pkg_stem - - self.summary = summary - if category_info_list is None: - category_info_list = [] - self.category_info_list = category_info_list - self.states = states - self.publisher = publisher - self.version = version - self.build_release = build_release - self.branch = branch - self.packaging_date = packaging_date - self.size = size - self.csize = csize - self.fmri = pfmri - self.licenses = licenses - self.links = links - self.hardlinks = hardlinks - self.files = files - self.dirs = dirs - self.dependencies = dependencies - self.description = description - self.attrs = attrs or {} - self.last_update = last_update - self.last_install = last_install - - def __str__(self): - return str(self.fmri) - - @staticmethod - def build_from_fmri(f): - if not f: - return f - pub, name, version = f.tuple() - pub = fmri.strip_pub_pfx(pub) - return PackageInfo(pkg_stem=name, publisher=pub, - version=version.release, - build_release=version.build_release, branch=version.branch, - packaging_date=version.get_timestamp().strftime("%c"), - pfmri=f) - - def get_attr_values(self, name, modifiers=()): - """Returns a list of the values of the package attribute 'name'. - - The 'modifiers' parameter, if present, is a dict containing - key/value pairs, all of which must be present on an action in - order for the values to be returned. - - Returns an empty list if there are no values. - """ - - # XXX should the modifiers parameter be allowed to be a subset - # of an action's modifiers? - if isinstance(modifiers, dict): - modifiers = tuple( - (k, isinstance(modifiers[k], six.string_types) and - tuple([sorted(modifiers[k])]) or - tuple(sorted(modifiers[k]))) - for k in sorted(six.iterkeys(modifiers)) - ) - return self.attrs.get(name, {modifiers: []}).get( - modifiers, []) - - -def _get_pkg_cat_data(cat, info_needed, actions=None, - excludes=misc.EmptyI, pfmri=None): - """This is a private method and not intended for - external consumers.""" - - # XXX this doesn't handle locale. - get_summ = summ = desc = cat_info = deps = None - cat_data = [] - get_summ = PackageInfo.SUMMARY in info_needed - if PackageInfo.CATEGORIES in info_needed: - cat_info = [] - if PackageInfo.DEPENDENCIES in info_needed: - cat_data.append(cat.DEPENDENCY) - deps = [] - - if deps is None or len(info_needed) != 1: - # Anything other than dependency data - # requires summary data. - cat_data.append(cat.SUMMARY) - - if actions is None: - actions = cat.get_entry_actions(pfmri, cat_data, - excludes=excludes) - - for a in actions: - if deps is not None and a.name == "depend": - deps.append(a.attrs.get(a.key_attr)) - continue - elif a.name != "set": - continue - - attr_name = a.attrs["name"] - if attr_name == "pkg.summary": - if get_summ: - summ = a.attrs["value"] - elif attr_name == "description": - if get_summ and summ is None: - # Historical summary field. - summ = a.attrs["value"] - elif attr_name == "pkg.description": - desc = a.attrs["value"] - elif cat_info != None and a.has_category_info(): - cat_info.extend(a.parse_category_info()) - - if get_summ and summ is None: - if desc is None: - summ = "" - else: - summ = desc - if not PackageInfo.DESCRIPTION in info_needed: - desc = None - return summ, desc, cat_info, deps + """A class capturing the information about packages that a client + could need. The fmri is guaranteed to be set. All other values may + be None, depending on how the PackageInfo instance was created.""" + + # Possible package states; these constants should match the values used + # by the Image class. Constants with negative values are not currently + # available. + INCORPORATED = -2 + EXCLUDES = -3 + KNOWN = pkgdefs.PKG_STATE_KNOWN + INSTALLED = pkgdefs.PKG_STATE_INSTALLED + UPGRADABLE = pkgdefs.PKG_STATE_UPGRADABLE + OBSOLETE = pkgdefs.PKG_STATE_OBSOLETE + RENAMED = pkgdefs.PKG_STATE_RENAMED + LEGACY = pkgdefs.PKG_STATE_LEGACY + UNSUPPORTED = pkgdefs.PKG_STATE_UNSUPPORTED + FROZEN = pkgdefs.PKG_STATE_FROZEN + OPTIONAL = pkgdefs.PKG_STATE_OPTIONAL + MANUAL = pkgdefs.PKG_STATE_MANUAL + + __NUM_PROPS = 13 + ( + IDENTITY, + SUMMARY, + CATEGORIES, + STATE, + SIZE, + LICENSES, + LINKS, + HARDLINKS, + FILES, + DIRS, + DEPENDENCIES, + DESCRIPTION, + ALL_ATTRIBUTES, + ) = range(__NUM_PROPS) + ALL_OPTIONS = frozenset(range(__NUM_PROPS)) + ACTION_OPTIONS = frozenset([LINKS, HARDLINKS, FILES, DIRS, DEPENDENCIES]) + + def __init__( + self, + pfmri, + pkg_stem=None, + summary=None, + category_info_list=None, + states=None, + publisher=None, + version=None, + build_release=None, + branch=None, + packaging_date=None, + size=None, + csize=None, + licenses=None, + links=None, + hardlinks=None, + files=None, + dirs=None, + dependencies=None, + description=None, + attrs=None, + last_update=None, + last_install=None, + ): + self.pkg_stem = pkg_stem + + self.summary = summary + if category_info_list is None: + category_info_list = [] + self.category_info_list = category_info_list + self.states = states + self.publisher = publisher + self.version = version + self.build_release = build_release + self.branch = branch + self.packaging_date = packaging_date + self.size = size + self.csize = csize + self.fmri = pfmri + self.licenses = licenses + self.links = links + self.hardlinks = hardlinks + self.files = files + self.dirs = dirs + self.dependencies = dependencies + self.description = description + self.attrs = attrs or {} + self.last_update = last_update + self.last_install = last_install + + def __str__(self): + return str(self.fmri) + + @staticmethod + def build_from_fmri(f): + if not f: + return f + pub, name, version = f.tuple() + pub = fmri.strip_pub_pfx(pub) + return PackageInfo( + pkg_stem=name, + publisher=pub, + version=version.release, + build_release=version.build_release, + branch=version.branch, + packaging_date=version.get_timestamp().strftime("%c"), + pfmri=f, + ) + + def get_attr_values(self, name, modifiers=()): + """Returns a list of the values of the package attribute 'name'. + + The 'modifiers' parameter, if present, is a dict containing + key/value pairs, all of which must be present on an action in + order for the values to be returned. + + Returns an empty list if there are no values. + """ + + # XXX should the modifiers parameter be allowed to be a subset + # of an action's modifiers? + if isinstance(modifiers, dict): + modifiers = tuple( + ( + k, + isinstance(modifiers[k], six.string_types) + and tuple([sorted(modifiers[k])]) + or tuple(sorted(modifiers[k])), + ) + for k in sorted(six.iterkeys(modifiers)) + ) + return self.attrs.get(name, {modifiers: []}).get(modifiers, []) + + +def _get_pkg_cat_data( + cat, info_needed, actions=None, excludes=misc.EmptyI, pfmri=None +): + """This is a private method and not intended for + external consumers.""" + + # XXX this doesn't handle locale. + get_summ = summ = desc = cat_info = deps = None + cat_data = [] + get_summ = PackageInfo.SUMMARY in info_needed + if PackageInfo.CATEGORIES in info_needed: + cat_info = [] + if PackageInfo.DEPENDENCIES in info_needed: + cat_data.append(cat.DEPENDENCY) + deps = [] + + if deps is None or len(info_needed) != 1: + # Anything other than dependency data + # requires summary data. + cat_data.append(cat.SUMMARY) + + if actions is None: + actions = cat.get_entry_actions(pfmri, cat_data, excludes=excludes) + + for a in actions: + if deps is not None and a.name == "depend": + deps.append(a.attrs.get(a.key_attr)) + continue + elif a.name != "set": + continue + + attr_name = a.attrs["name"] + if attr_name == "pkg.summary": + if get_summ: + summ = a.attrs["value"] + elif attr_name == "description": + if get_summ and summ is None: + # Historical summary field. + summ = a.attrs["value"] + elif attr_name == "pkg.description": + desc = a.attrs["value"] + elif cat_info != None and a.has_category_info(): + cat_info.extend(a.parse_category_info()) + + if get_summ and summ is None: + if desc is None: + summ = "" + else: + summ = desc + if not PackageInfo.DESCRIPTION in info_needed: + desc = None + return summ, desc, cat_info, deps + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/arch.py b/src/modules/arch.py index 85fd2075d..f95106cee 100644 --- a/src/modules/arch.py +++ b/src/modules/arch.py @@ -72,6 +72,7 @@ def get_isainfo(): buf = buf1 from pkg.misc import force_text + # ffi.string returns a bytes if buf == NULL: buf1 = force_text(ffi.string(ffi.cast("char *", buf1))) @@ -90,6 +91,7 @@ def get_release(): if buf == NULL: return from pkg.misc import force_text + return force_text(ffi.string(ffi.cast("char *", buf))) @@ -99,7 +101,9 @@ def get_platform(): if buf == NULL: return from pkg.misc import force_text + return force_text(ffi.string(ffi.cast("char *", buf))) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/bundle/DirectoryBundle.py b/src/modules/bundle/DirectoryBundle.py index 907b892fb..c5aaa84d4 100644 --- a/src/modules/bundle/DirectoryBundle.py +++ b/src/modules/bundle/DirectoryBundle.py @@ -35,113 +35,129 @@ import pkg.actions.link import pkg.actions.hardlink + class DirectoryBundle(pkg.bundle.Bundle): - """The DirectoryBundle class assists in the conversion of a directory - tree to a pkg(7) package by traversing the tree and emitting actions for - all files, directories, and links found therein. - - Paths are published relative to the given directory. Hardlinks are - resolved as long as their companions are in the tree as well. - - All owners are set to "root" and groups to "bin", as the ownership - information is not considered to be valid. These can be set by the - caller once the action has been emitted. - """ - - def __init__(self, path, targetpaths=(), use_default_owner=True): - # XXX This could be more intelligent. Or get user input. Or - # extend API to take FMRI. - path = os.path.normpath(path) - self.filename = path - self.rootdir = path - self.pkgname = os.path.basename(self.rootdir) - self.inodes = None - self.targetpaths = targetpaths - self.pkg = None - self.use_default_owner = use_default_owner - - def _walk_bundle(self): - # Pre-populate self.inodes with the paths of known targets - if self.inodes is None: - self.inodes = {} - for p in self.targetpaths: - fp = os.path.join(self.rootdir, p) - pstat = os.lstat(fp) - self.inodes[pstat.st_ino] = fp - - for root, dirs, files in os.walk(self.rootdir): - for obj in dirs + files: - path = os.path.join(root, obj) - yield path, (path,) - - def __iter__(self): - for path, data in self._walk_bundle(): - act = self.action(*data) - if act: - yield act - - def action(self, path): - rootdir = self.rootdir - pubpath = pkg.misc.relpath(path, rootdir) - pstat = os.lstat(path) - mode = oct(stat.S_IMODE(pstat.st_mode)) - timestamp = pkg.misc.time_to_timestamp(pstat.st_mtime) - - # Set default root and group. - owner = "root" - group = "bin" - - # Check whether need to change owner. - if not self.use_default_owner: - try: - owner = pwd.getpwuid(pstat.st_uid).pw_name - except KeyError as e: - owner = None - try: - group = grp.getgrgid(pstat.st_gid).gr_name - except KeyError as e: - group = None - - if not owner and not group: - raise pkg.bundle.InvalidOwnershipException( - path, uid=pstat.st_uid, gid=pstat.st_gid) - elif not owner: - raise pkg.bundle.InvalidOwnershipException( - path, uid=pstat.st_uid) - elif not group: - raise pkg.bundle.InvalidOwnershipException( - path, gid=pstat.st_gid) - - if stat.S_ISREG(pstat.st_mode): - inode = pstat.st_ino - # Any inode in self.inodes will either have been visited - # before or will have been pre-populated from the list - # of known targets. Create file actions for known - # targets and unvisited inodes. - if pubpath in self.targetpaths or \ - inode not in self.inodes: - if pstat.st_nlink > 1: - self.inodes.setdefault(inode, path) - return pkg.actions.file.FileAction( - open(path, "rb"), mode=mode, owner=owner, - group=group, path=pubpath, - timestamp=timestamp) - else: - # Find the relative path to the link target. - target = pkg.misc.relpath(self.inodes[inode], - os.path.dirname(path)) - return pkg.actions.hardlink.HardLinkAction( - path=pubpath, target=target) - elif stat.S_ISLNK(pstat.st_mode): - return pkg.actions.link.LinkAction( - target=os.readlink(path), path=pubpath) - elif stat.S_ISDIR(pstat.st_mode): - return pkg.actions.directory.DirectoryAction( - timestamp=timestamp, mode=mode, owner=owner, - group=group, path=pubpath) + """The DirectoryBundle class assists in the conversion of a directory + tree to a pkg(7) package by traversing the tree and emitting actions for + all files, directories, and links found therein. + + Paths are published relative to the given directory. Hardlinks are + resolved as long as their companions are in the tree as well. + + All owners are set to "root" and groups to "bin", as the ownership + information is not considered to be valid. These can be set by the + caller once the action has been emitted. + """ + + def __init__(self, path, targetpaths=(), use_default_owner=True): + # XXX This could be more intelligent. Or get user input. Or + # extend API to take FMRI. + path = os.path.normpath(path) + self.filename = path + self.rootdir = path + self.pkgname = os.path.basename(self.rootdir) + self.inodes = None + self.targetpaths = targetpaths + self.pkg = None + self.use_default_owner = use_default_owner + + def _walk_bundle(self): + # Pre-populate self.inodes with the paths of known targets + if self.inodes is None: + self.inodes = {} + for p in self.targetpaths: + fp = os.path.join(self.rootdir, p) + pstat = os.lstat(fp) + self.inodes[pstat.st_ino] = fp + + for root, dirs, files in os.walk(self.rootdir): + for obj in dirs + files: + path = os.path.join(root, obj) + yield path, (path,) + + def __iter__(self): + for path, data in self._walk_bundle(): + act = self.action(*data) + if act: + yield act + + def action(self, path): + rootdir = self.rootdir + pubpath = pkg.misc.relpath(path, rootdir) + pstat = os.lstat(path) + mode = oct(stat.S_IMODE(pstat.st_mode)) + timestamp = pkg.misc.time_to_timestamp(pstat.st_mtime) + + # Set default root and group. + owner = "root" + group = "bin" + + # Check whether need to change owner. + if not self.use_default_owner: + try: + owner = pwd.getpwuid(pstat.st_uid).pw_name + except KeyError as e: + owner = None + try: + group = grp.getgrgid(pstat.st_gid).gr_name + except KeyError as e: + group = None + + if not owner and not group: + raise pkg.bundle.InvalidOwnershipException( + path, uid=pstat.st_uid, gid=pstat.st_gid + ) + elif not owner: + raise pkg.bundle.InvalidOwnershipException( + path, uid=pstat.st_uid + ) + elif not group: + raise pkg.bundle.InvalidOwnershipException( + path, gid=pstat.st_gid + ) + + if stat.S_ISREG(pstat.st_mode): + inode = pstat.st_ino + # Any inode in self.inodes will either have been visited + # before or will have been pre-populated from the list + # of known targets. Create file actions for known + # targets and unvisited inodes. + if pubpath in self.targetpaths or inode not in self.inodes: + if pstat.st_nlink > 1: + self.inodes.setdefault(inode, path) + return pkg.actions.file.FileAction( + open(path, "rb"), + mode=mode, + owner=owner, + group=group, + path=pubpath, + timestamp=timestamp, + ) + else: + # Find the relative path to the link target. + target = pkg.misc.relpath( + self.inodes[inode], os.path.dirname(path) + ) + return pkg.actions.hardlink.HardLinkAction( + path=pubpath, target=target + ) + elif stat.S_ISLNK(pstat.st_mode): + return pkg.actions.link.LinkAction( + target=os.readlink(path), path=pubpath + ) + elif stat.S_ISDIR(pstat.st_mode): + return pkg.actions.directory.DirectoryAction( + timestamp=timestamp, + mode=mode, + owner=owner, + group=group, + path=pubpath, + ) + def test(filename): - return stat.S_ISDIR(os.stat(filename).st_mode) + return stat.S_ISDIR(os.stat(filename).st_mode) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/bundle/SolarisPackageDatastreamBundle.py b/src/modules/bundle/SolarisPackageDatastreamBundle.py index 6ab053ae6..52ccf49a0 100644 --- a/src/modules/bundle/SolarisPackageDatastreamBundle.py +++ b/src/modules/bundle/SolarisPackageDatastreamBundle.py @@ -34,180 +34,198 @@ from pkg.bundle import InvalidBundleException typemap = { - stat.S_IFBLK: "block-special", - stat.S_IFCHR: "character-special", - stat.S_IFDIR: "directory", - stat.S_IFIFO: "fifo", - stat.S_IFLNK: "link", - stat.S_IFREG: "file", - stat.S_IFSOCK: "socket" + stat.S_IFBLK: "block-special", + stat.S_IFCHR: "character-special", + stat.S_IFDIR: "directory", + stat.S_IFIFO: "fifo", + stat.S_IFLNK: "link", + stat.S_IFREG: "file", + stat.S_IFSOCK: "socket", } + class SolarisPackageDatastreamBundle(SolarisPackageDirBundle): - """XXX Need a class comment.""" - - def __init__(self, filename, **kwargs): - filename = os.path.normpath(filename) - self.pkg = SolarisPackage(filename) - self.pkgname = self.pkg.pkginfo["PKG"] - self.filename = filename - - # map the path name to the SVR4 class it belongs to and - # maintain a set of pre/post install/remove and class action - # scripts this package uses. - self.class_actions_dir = {} - self.class_action_names = set() - self.scripts = set() - - self.hollow = self.pkg.pkginfo.get("SUNW_PKG_HOLLOW", - "").lower() == "true" - self.pkginfo_actions = self.get_pkginfo_actions( - self.pkg.pkginfo) - - # SolarisPackage.manifest is a list. Cache it into a dictionary - # based on pathname. The cpio archive contains the files as - # they would be in the directory structure -- that is, under - # install, reloc, or root, depending on whether they're i-type - # files, relocatable files, or unrelocatable files. Make sure - # we find the right object, even though the filenames in the - # package map don't have these directory names. - self.pkgmap = {} - - for p in self.pkg.manifest: - if p.type in "fevdsl": - if p.pathname[0] == "/": - d = "root" - else: - d = "reloc/" - self.pkgmap[d + p.pathname] = p - self.class_actions_dir[p.pathname] = p.klass - self.class_action_names.add(p.klass) - elif p.type == "i": - self.pkgmap["install/" + p.pathname] = p - - def _walk_bundle(self): - for act in self.pkginfo_actions: - yield act.attrs.get("path"), act - - for p in self.pkg.datastream: - yield p.name, (self.pkgmap, p, p.name) - - # for some reason, some packages may have directories specified - # in the pkgmap that don't exist in the archive. They need to - # be found and iterated as well. - # - # Some of the blastwave packages also have directories in the - # archive that don't exist in the package metadata. I don't see - # a whole lot of point in faking those up. - for p in self.pkg.manifest: - if p.type not in "lsd": - continue - - if p.pathname[0] == "/": - d = "root" - else: - d = "reloc/" - path = d + p.pathname - if (p.type == "d" and path not in self.pkg.datastream) or \ - p.type in "ls": - yield path, (self.pkgmap, None, path) - - def __iter__(self): - """Iterate through the datastream. - - This is different than the directory-format package bundle, - which iterates through the package map. We do it this way - because the cpio archive might not be in the same order as - the package map, and we want never to seek backwards. This - implies that we're going to have to look up the meta info for - each file from the package map. We could get the file type - from the archive, but it's probably safe to assume that the - file type in the archive is the same as the file type in the - package map. - """ - for path, data in self._walk_bundle(): - if type(data) != tuple: - yield data - continue - - act = self.action(*data) - if act: - yield act - - def action(self, pkgmap, ci, path): - try: - mapline = pkgmap[path] - except KeyError: - # XXX Return an unknown instead of a missing, for now. - return unknown.UnknownAction(path=path) - - act = None - - # If any one of the mode, owner, or group is "?", then we're - # clearly not capable of delivering the object correctly, so - # ignore it. - if mapline.type in "fevdx" and (mapline.mode == "?" or - mapline.owner == "?" or mapline.group == "?"): - return None - - if mapline.type in "fev": - # false positive - # file-builtin; pylint: disable=W1607 - act = file.FileAction(ci.extractfile(), - mode=mapline.mode, owner=mapline.owner, - group=mapline.group, path=mapline.pathname, - timestamp=misc.time_to_timestamp(int(mapline.modtime))) - elif mapline.type in "dx": - act = directory.DirectoryAction(mode = mapline.mode, - owner=mapline.owner, group=mapline.group, - path=mapline.pathname) - elif mapline.type == "s": - act = link.LinkAction(path=mapline.pathname, - target=mapline.target) - elif mapline.type == "l": - act = hardlink.HardLinkAction(path=mapline.pathname, - target=mapline.target) - elif mapline.type == "i" and mapline.pathname == "copyright": - act = license.LicenseAction(ci.extractfile(), - license="{0}.copyright".format(self.pkgname)) - act.hash = "install/copyright" - elif mapline.type == "i": - if mapline.pathname not in ["depend", "pkginfo"]: - # check to see if we've seen this script - # before - script = mapline.pathname - if script.startswith("i.") and \ - script.replace("i.", "", 1) in \ - self.class_action_names: - pass - elif script.startswith("r.") and \ - script.replace("r.", "", 1) in \ - self.class_action_names: - pass - else: - self.scripts.add(script) - return None + """XXX Need a class comment.""" + + def __init__(self, filename, **kwargs): + filename = os.path.normpath(filename) + self.pkg = SolarisPackage(filename) + self.pkgname = self.pkg.pkginfo["PKG"] + self.filename = filename + + # map the path name to the SVR4 class it belongs to and + # maintain a set of pre/post install/remove and class action + # scripts this package uses. + self.class_actions_dir = {} + self.class_action_names = set() + self.scripts = set() + + self.hollow = ( + self.pkg.pkginfo.get("SUNW_PKG_HOLLOW", "").lower() == "true" + ) + self.pkginfo_actions = self.get_pkginfo_actions(self.pkg.pkginfo) + + # SolarisPackage.manifest is a list. Cache it into a dictionary + # based on pathname. The cpio archive contains the files as + # they would be in the directory structure -- that is, under + # install, reloc, or root, depending on whether they're i-type + # files, relocatable files, or unrelocatable files. Make sure + # we find the right object, even though the filenames in the + # package map don't have these directory names. + self.pkgmap = {} + + for p in self.pkg.manifest: + if p.type in "fevdsl": + if p.pathname[0] == "/": + d = "root" + else: + d = "reloc/" + self.pkgmap[d + p.pathname] = p + self.class_actions_dir[p.pathname] = p.klass + self.class_action_names.add(p.klass) + elif p.type == "i": + self.pkgmap["install/" + p.pathname] = p + + def _walk_bundle(self): + for act in self.pkginfo_actions: + yield act.attrs.get("path"), act + + for p in self.pkg.datastream: + yield p.name, (self.pkgmap, p, p.name) + + # for some reason, some packages may have directories specified + # in the pkgmap that don't exist in the archive. They need to + # be found and iterated as well. + # + # Some of the blastwave packages also have directories in the + # archive that don't exist in the package metadata. I don't see + # a whole lot of point in faking those up. + for p in self.pkg.manifest: + if p.type not in "lsd": + continue + + if p.pathname[0] == "/": + d = "root" + else: + d = "reloc/" + path = d + p.pathname + if ( + p.type == "d" and path not in self.pkg.datastream + ) or p.type in "ls": + yield path, (self.pkgmap, None, path) + + def __iter__(self): + """Iterate through the datastream. + + This is different than the directory-format package bundle, + which iterates through the package map. We do it this way + because the cpio archive might not be in the same order as + the package map, and we want never to seek backwards. This + implies that we're going to have to look up the meta info for + each file from the package map. We could get the file type + from the archive, but it's probably safe to assume that the + file type in the archive is the same as the file type in the + package map. + """ + for path, data in self._walk_bundle(): + if type(data) != tuple: + yield data + continue + + act = self.action(*data) + if act: + yield act + + def action(self, pkgmap, ci, path): + try: + mapline = pkgmap[path] + except KeyError: + # XXX Return an unknown instead of a missing, for now. + return unknown.UnknownAction(path=path) + + act = None + + # If any one of the mode, owner, or group is "?", then we're + # clearly not capable of delivering the object correctly, so + # ignore it. + if mapline.type in "fevdx" and ( + mapline.mode == "?" or mapline.owner == "?" or mapline.group == "?" + ): + return None + + if mapline.type in "fev": + # false positive + # file-builtin; pylint: disable=W1607 + act = file.FileAction( + ci.extractfile(), + mode=mapline.mode, + owner=mapline.owner, + group=mapline.group, + path=mapline.pathname, + timestamp=misc.time_to_timestamp(int(mapline.modtime)), + ) + elif mapline.type in "dx": + act = directory.DirectoryAction( + mode=mapline.mode, + owner=mapline.owner, + group=mapline.group, + path=mapline.pathname, + ) + elif mapline.type == "s": + act = link.LinkAction(path=mapline.pathname, target=mapline.target) + elif mapline.type == "l": + act = hardlink.HardLinkAction( + path=mapline.pathname, target=mapline.target + ) + elif mapline.type == "i" and mapline.pathname == "copyright": + act = license.LicenseAction( + ci.extractfile(), license="{0}.copyright".format(self.pkgname) + ) + act.hash = "install/copyright" + elif mapline.type == "i": + if mapline.pathname not in ["depend", "pkginfo"]: + # check to see if we've seen this script + # before + script = mapline.pathname + if ( + script.startswith("i.") + and script.replace("i.", "", 1) in self.class_action_names + ): + pass + elif ( + script.startswith("r.") + and script.replace("r.", "", 1) in self.class_action_names + ): + pass else: - act = unknown.UnknownAction(path=mapline.pathname) + self.scripts.add(script) + return None + else: + act = unknown.UnknownAction(path=mapline.pathname) + + if self.hollow and act: + act.attrs[self.hollow_attr] = "true" + return act - if self.hollow and act: - act.attrs[self.hollow_attr] = "true" - return act def test(filename): - if not os.path.isfile(filename): - return False + if not os.path.isfile(filename): + return False + + try: + SolarisPackage(filename) + return True + except MultiPackageDatastreamException: + raise InvalidBundleException( + _( + "Multi-package datastreams are not supported.\n" + "Please use pkgtrans(1) to convert this bundle to " + "multiple\nfilesystem format packages." + ) + ) + except: + return False - try: - SolarisPackage(filename) - return True - except MultiPackageDatastreamException: - raise InvalidBundleException( - _("Multi-package datastreams are not supported.\n" - "Please use pkgtrans(1) to convert this bundle to " - "multiple\nfilesystem format packages.")) - except: - return False # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/bundle/SolarisPackageDirBundle.py b/src/modules/bundle/SolarisPackageDirBundle.py index a0a62ef7f..365865c26 100644 --- a/src/modules/bundle/SolarisPackageDirBundle.py +++ b/src/modules/bundle/SolarisPackageDirBundle.py @@ -38,273 +38,294 @@ class SolarisPackageDirBundle(pkg.bundle.Bundle): - - hollow_attr = "pkg.send.convert.sunw-pkg-hollow" - - def __init__(self, filename, data=True, **kwargs): - filename = os.path.normpath(filename) - self.pkg = SolarisPackage(filename) - self.pkgname = self.pkg.pkginfo["PKG"] - self.filename = filename - self.data = data - - # map the path name to the SVR4 class it belongs to and - # maintain a set of pre/post install/remove and class action - # scripts this package uses. - self.class_actions_dir = {} - self.class_action_names = set() - self.scripts = set() - - self.hollow = self.pkg.pkginfo.get("SUNW_PKG_HOLLOW", - "").lower() == "true" - # A list of pkg.action.AttributeActions with pkginfo - # attributes for items that don't map to pkg(7) equivalents - self.pkginfo_actions = self.get_pkginfo_actions(self.pkg.pkginfo) - - def _walk_bundle(self): - faspac = [] - if "faspac" in self.pkg.pkginfo: - faspac = self.pkg.pkginfo["faspac"] - - # Want to access the manifest as a dict. - pkgmap = {} - for p in self.pkg.manifest: - pkgmap[p.pathname] = p - self.class_actions_dir[p.pathname] = p.klass - self.class_action_names.add(p.klass) - - for act in self.pkginfo_actions: - yield act.attrs.get("path"), act - - if not self.data: - for p in self.pkg.manifest: - act = self.action(p, None) - if act: - yield act.attrs.get("path"), act - return - - def j(path): - return os.path.join(self.pkg.basedir, path) - - faspac_contents = set() - - for klass in faspac: - fpath = os.path.join(self.filename, "archive", klass) - # We accept either bz2 or 7zip'd files - for x in [".bz2", ".7z"]: - if os.path.exists(fpath + x): - cf = CpioFile.open(fpath + x) - break - - for ci in cf: - faspac_contents.add(j(ci.name)) - act = self.action(pkgmap[j(ci.name)], - ci.extractfile()) - if act: - yield act.attrs.get("path"), act - - # Remove BASEDIR from a relocatable path. The extra work is - # because if BASEDIR is not empty (non-"/"), then we probably - # need to strip an extra slash from the beginning of the path, - # but if BASEDIR is "" ("/" in the pkginfo file), then we don't - # need to do anything extra. - def r(path, ptype): - if ptype == "i": - return path - if path[0] == "/": - return path[1:] - p = path[len(self.pkg.basedir):] - if p[0] == "/": - p = p[1:] - return p - - for p in self.pkg.manifest: - # Just do the files that remain. Only regular file - # types end up compressed; so skip them and only them. - # Files with special characters in their names may not - # end up in the faspac archive, so we still need to emit - # the ones that aren't. - if p.type in "fev" and p.klass in faspac and \ - p.pathname in faspac_contents: - continue - - # These are the only valid file types in SysV packages - if p.type in "ifevbcdxpls": - if p.type == "i": - d = "install" - elif p.pathname[0] == "/": - d = "root" - else: - d = "reloc" - act = self.action(p, os.path.join(self.filename, - d, r(p.pathname, p.type))) - if act: - if act.name == "license": - # This relies on the fact that - # license actions have their - # hash set to the package path. - yield act.hash, act - else: - yield os.path.join(d, act.attrs.get( - "path", "")), act - - def __iter__(self): - for entry in self._walk_bundle(): - yield entry[-1] - - def action(self, mapline, data): - preserve_dict = { - "renameold": "renameold", - "renamenew": "renamenew", - "preserve": "true", - "svmpreserve": "true" - } - - act = None - - # If any one of the mode, owner, or group is "?", then we're - # clearly not capable of delivering the object correctly, so - # ignore it. - if mapline.type in "fevdx" and (mapline.mode == "?" or - mapline.owner == "?" or mapline.group == "?"): - return None - - if mapline.type in "fev": - # false positive - # file-builtin; pylint: disable=W1607 - act = file.FileAction(data, mode=mapline.mode, - owner=mapline.owner, group=mapline.group, - path=mapline.pathname, - timestamp=misc.time_to_timestamp(int(mapline.modtime))) - - # Add a preserve attribute if klass is known to be used - # for preservation. For editable and volatile files, - # always do at least basic preservation. - preserve = preserve_dict.get(mapline.klass, None) - if preserve or mapline.type in "ev": - if not preserve: - preserve = "true" - act.attrs["preserve"] = preserve - - if act.hash == "NOHASH" and \ - isinstance(data, six.string_types) and \ - data.startswith(self.filename): - act.hash = data[len(self.filename) + 1:] - elif mapline.type in "dx": - act = directory.DirectoryAction(mode=mapline.mode, - owner=mapline.owner, group=mapline.group, - path=mapline.pathname) - elif mapline.type == "s": - act = link.LinkAction(path=mapline.pathname, - target=mapline.target) - elif mapline.type == "l": - act = hardlink.HardLinkAction(path=mapline.pathname, - target=mapline.target) - elif mapline.type == "i" and mapline.pathname == "copyright": - act = license.LicenseAction(data, - license="{0}.copyright".format(self.pkgname)) - if act.hash == "NOHASH" and \ - isinstance(data, six.string_types) and \ - data.startswith(self.filename): - act.hash = data[len(self.filename) + 1:] - elif mapline.type == "i": - if mapline.pathname not in ["depend", "pkginfo"]: - # check to see if we've seen this script - # before - script = mapline.pathname - if script.startswith("i.") and \ - script.replace("i.", "", 1) \ - in self.class_action_names: - pass - elif script.startswith("r.") and \ - script.replace("r.", "", 1) in \ - self.class_action_names: - pass - else: - self.scripts.add(script) - return None + hollow_attr = "pkg.send.convert.sunw-pkg-hollow" + + def __init__(self, filename, data=True, **kwargs): + filename = os.path.normpath(filename) + self.pkg = SolarisPackage(filename) + self.pkgname = self.pkg.pkginfo["PKG"] + self.filename = filename + self.data = data + + # map the path name to the SVR4 class it belongs to and + # maintain a set of pre/post install/remove and class action + # scripts this package uses. + self.class_actions_dir = {} + self.class_action_names = set() + self.scripts = set() + + self.hollow = ( + self.pkg.pkginfo.get("SUNW_PKG_HOLLOW", "").lower() == "true" + ) + # A list of pkg.action.AttributeActions with pkginfo + # attributes for items that don't map to pkg(7) equivalents + self.pkginfo_actions = self.get_pkginfo_actions(self.pkg.pkginfo) + + def _walk_bundle(self): + faspac = [] + if "faspac" in self.pkg.pkginfo: + faspac = self.pkg.pkginfo["faspac"] + + # Want to access the manifest as a dict. + pkgmap = {} + for p in self.pkg.manifest: + pkgmap[p.pathname] = p + self.class_actions_dir[p.pathname] = p.klass + self.class_action_names.add(p.klass) + + for act in self.pkginfo_actions: + yield act.attrs.get("path"), act + + if not self.data: + for p in self.pkg.manifest: + act = self.action(p, None) + if act: + yield act.attrs.get("path"), act + return + + def j(path): + return os.path.join(self.pkg.basedir, path) + + faspac_contents = set() + + for klass in faspac: + fpath = os.path.join(self.filename, "archive", klass) + # We accept either bz2 or 7zip'd files + for x in [".bz2", ".7z"]: + if os.path.exists(fpath + x): + cf = CpioFile.open(fpath + x) + break + + for ci in cf: + faspac_contents.add(j(ci.name)) + act = self.action(pkgmap[j(ci.name)], ci.extractfile()) + if act: + yield act.attrs.get("path"), act + + # Remove BASEDIR from a relocatable path. The extra work is + # because if BASEDIR is not empty (non-"/"), then we probably + # need to strip an extra slash from the beginning of the path, + # but if BASEDIR is "" ("/" in the pkginfo file), then we don't + # need to do anything extra. + def r(path, ptype): + if ptype == "i": + return path + if path[0] == "/": + return path[1:] + p = path[len(self.pkg.basedir) :] + if p[0] == "/": + p = p[1:] + return p + + for p in self.pkg.manifest: + # Just do the files that remain. Only regular file + # types end up compressed; so skip them and only them. + # Files with special characters in their names may not + # end up in the faspac archive, so we still need to emit + # the ones that aren't. + if ( + p.type in "fev" + and p.klass in faspac + and p.pathname in faspac_contents + ): + continue + + # These are the only valid file types in SysV packages + if p.type in "ifevbcdxpls": + if p.type == "i": + d = "install" + elif p.pathname[0] == "/": + d = "root" + else: + d = "reloc" + act = self.action( + p, os.path.join(self.filename, d, r(p.pathname, p.type)) + ) + if act: + if act.name == "license": + # This relies on the fact that + # license actions have their + # hash set to the package path. + yield act.hash, act + else: + yield os.path.join(d, act.attrs.get("path", "")), act + + def __iter__(self): + for entry in self._walk_bundle(): + yield entry[-1] + + def action(self, mapline, data): + preserve_dict = { + "renameold": "renameold", + "renamenew": "renamenew", + "preserve": "true", + "svmpreserve": "true", + } + + act = None + + # If any one of the mode, owner, or group is "?", then we're + # clearly not capable of delivering the object correctly, so + # ignore it. + if mapline.type in "fevdx" and ( + mapline.mode == "?" or mapline.owner == "?" or mapline.group == "?" + ): + return None + + if mapline.type in "fev": + # false positive + # file-builtin; pylint: disable=W1607 + act = file.FileAction( + data, + mode=mapline.mode, + owner=mapline.owner, + group=mapline.group, + path=mapline.pathname, + timestamp=misc.time_to_timestamp(int(mapline.modtime)), + ) + + # Add a preserve attribute if klass is known to be used + # for preservation. For editable and volatile files, + # always do at least basic preservation. + preserve = preserve_dict.get(mapline.klass, None) + if preserve or mapline.type in "ev": + if not preserve: + preserve = "true" + act.attrs["preserve"] = preserve + + if ( + act.hash == "NOHASH" + and isinstance(data, six.string_types) + and data.startswith(self.filename) + ): + act.hash = data[len(self.filename) + 1 :] + elif mapline.type in "dx": + act = directory.DirectoryAction( + mode=mapline.mode, + owner=mapline.owner, + group=mapline.group, + path=mapline.pathname, + ) + elif mapline.type == "s": + act = link.LinkAction(path=mapline.pathname, target=mapline.target) + elif mapline.type == "l": + act = hardlink.HardLinkAction( + path=mapline.pathname, target=mapline.target + ) + elif mapline.type == "i" and mapline.pathname == "copyright": + act = license.LicenseAction( + data, license="{0}.copyright".format(self.pkgname) + ) + if ( + act.hash == "NOHASH" + and isinstance(data, six.string_types) + and data.startswith(self.filename) + ): + act.hash = data[len(self.filename) + 1 :] + elif mapline.type == "i": + if mapline.pathname not in ["depend", "pkginfo"]: + # check to see if we've seen this script + # before + script = mapline.pathname + if ( + script.startswith("i.") + and script.replace("i.", "", 1) in self.class_action_names + ): + pass + elif ( + script.startswith("r.") + and script.replace("r.", "", 1) in self.class_action_names + ): + pass else: - act = unknown.UnknownAction(path=mapline.pathname) - - if self.hollow and act: - act.attrs["pkg.send.convert.sunw-pkg-hollow"] = "true" - return act - - def get_pkginfo_actions(self, pkginfo): - """Creates a list of pkg.action.AttributeActions corresponding - to pkginfo fields that aren't directly mapped to pkg(7) - equivalents.""" - - # these keys get converted to a legacy action - legacy_keys = [ - "arch", - "category", - "name", - "desc", - "hotline", - "pkg", - "vendor", - "version" - ] - - # parameters defined in pkginfo(5) that we always ignore. - # by default, we also ignore SUNW_* - ignored_keys = [ - "pstamp", - "pkginst", - "maxinst", - "classes", - "basedir", - "intonly", - "istates", - "order", - "rstates", - "ulimit", - # XXX pkg.sysvpkg adds this, ignoring for now. - "pkg.plat", - ] - ignored_keys.extend(legacy_keys) - - actions = [] - for key in pkginfo: - if not pkginfo[key]: - continue - - name = key.lower() - if name in ignored_keys or "SUNW_" in key: - continue - name = "pkg.send.convert.{0}".format(name) - name = name.replace("_", "-") - actions.append(AttributeAction(name=name, - value=pkginfo[key])) - - legacy_attrs = {} - for key in pkginfo: - name = key.lower() - if name in legacy_keys: - name = name.replace("_", "-") - legacy_attrs[name] = pkginfo[key] - - actions.append(LegacyAction(**legacy_attrs)) - - if "DESC" in pkginfo: - actions.append(AttributeAction(name="pkg.description", - value=pkginfo["DESC"])) - if "NAME" in pkginfo: - actions.append(AttributeAction(name="pkg.summary", - value=pkginfo["NAME"])) - if self.hollow: - for act in actions: - act.attrs[self.hollow_attr] = "true" - - return actions + self.scripts.add(script) + return None + else: + act = unknown.UnknownAction(path=mapline.pathname) + + if self.hollow and act: + act.attrs["pkg.send.convert.sunw-pkg-hollow"] = "true" + return act + + def get_pkginfo_actions(self, pkginfo): + """Creates a list of pkg.action.AttributeActions corresponding + to pkginfo fields that aren't directly mapped to pkg(7) + equivalents.""" + + # these keys get converted to a legacy action + legacy_keys = [ + "arch", + "category", + "name", + "desc", + "hotline", + "pkg", + "vendor", + "version", + ] + + # parameters defined in pkginfo(5) that we always ignore. + # by default, we also ignore SUNW_* + ignored_keys = [ + "pstamp", + "pkginst", + "maxinst", + "classes", + "basedir", + "intonly", + "istates", + "order", + "rstates", + "ulimit", + # XXX pkg.sysvpkg adds this, ignoring for now. + "pkg.plat", + ] + ignored_keys.extend(legacy_keys) + + actions = [] + for key in pkginfo: + if not pkginfo[key]: + continue + + name = key.lower() + if name in ignored_keys or "SUNW_" in key: + continue + name = "pkg.send.convert.{0}".format(name) + name = name.replace("_", "-") + actions.append(AttributeAction(name=name, value=pkginfo[key])) + + legacy_attrs = {} + for key in pkginfo: + name = key.lower() + if name in legacy_keys: + name = name.replace("_", "-") + legacy_attrs[name] = pkginfo[key] + + actions.append(LegacyAction(**legacy_attrs)) + + if "DESC" in pkginfo: + actions.append( + AttributeAction(name="pkg.description", value=pkginfo["DESC"]) + ) + if "NAME" in pkginfo: + actions.append( + AttributeAction(name="pkg.summary", value=pkginfo["NAME"]) + ) + if self.hollow: + for act in actions: + act.attrs[self.hollow_attr] = "true" + + return actions + def test(filename): - if os.path.isfile(os.path.join(filename, "pkginfo")) and \ - os.path.isfile(os.path.join(filename, "pkgmap")): - return True + if os.path.isfile(os.path.join(filename, "pkginfo")) and os.path.isfile( + os.path.join(filename, "pkgmap") + ): + return True + + return False - return False # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/bundle/TarBundle.py b/src/modules/bundle/TarBundle.py index 3aed7d26f..1103bf4a3 100644 --- a/src/modules/bundle/TarBundle.py +++ b/src/modules/bundle/TarBundle.py @@ -31,53 +31,60 @@ import pkg.misc as misc from pkg.actions import * + class TarBundle(pkg.bundle.Bundle): + def __init__(self, filename, **kwargs): + # XXX This could be more intelligent. Or get user input. Or + # extend API to take FMRI. + filename = os.path.normpath(filename) + self.tf = tarfile.open(filename) + self.filename = filename + self.pkgname = os.path.basename(filename) + self.pkg = None - def __init__(self, filename, **kwargs): - # XXX This could be more intelligent. Or get user input. Or - # extend API to take FMRI. - filename = os.path.normpath(filename) - self.tf = tarfile.open(filename) - self.filename = filename - self.pkgname = os.path.basename(filename) - self.pkg = None + def __del__(self): + self.tf.close() - def __del__(self): - self.tf.close() + def _walk_bundle(self): + for f in self.tf: + yield f.name, (self.tf, f) - def _walk_bundle(self): - for f in self.tf: - yield f.name, (self.tf, f) + def __iter__(self): + for path, data in self._walk_bundle(): + yield self.action(*data) - def __iter__(self): - for path, data in self._walk_bundle(): - yield self.action(*data) + def action(self, tarfile, tarinfo): + if tarinfo.isreg(): + # false positive + # file-builtin; pylint: disable=W1607 + return file.FileAction( + tarfile.extractfile(tarinfo), + mode=oct(stat.S_IMODE(tarinfo.mode)), + owner=tarinfo.uname, + group=tarinfo.gname, + path=tarinfo.name, + timestamp=misc.time_to_timestamp(tarinfo.mtime), + ) + elif tarinfo.isdir(): + return directory.DirectoryAction( + mode=oct(stat.S_IMODE(tarinfo.mode)), + owner=tarinfo.uname, + group=tarinfo.gname, + path=tarinfo.name, + ) + elif tarinfo.issym(): + return link.LinkAction(path=tarinfo.name, target=tarinfo.linkname) + elif tarinfo.islnk(): + return hardlink.HardLinkAction( + path=tarinfo.name, target=tarinfo.linkname + ) + else: + return unknown.UnknownAction(path=tarinfo.name) - def action(self, tarfile, tarinfo): - if tarinfo.isreg(): - # false positive - # file-builtin; pylint: disable=W1607 - return file.FileAction(tarfile.extractfile(tarinfo), - mode=oct(stat.S_IMODE(tarinfo.mode)), - owner=tarinfo.uname, group=tarinfo.gname, - path=tarinfo.name, - timestamp=misc.time_to_timestamp(tarinfo.mtime)) - elif tarinfo.isdir(): - return directory.DirectoryAction( - mode=oct(stat.S_IMODE(tarinfo.mode)), - owner=tarinfo.uname, group=tarinfo.gname, - path=tarinfo.name) - elif tarinfo.issym(): - return link.LinkAction(path=tarinfo.name, - target=tarinfo.linkname) - elif tarinfo.islnk(): - return hardlink.HardLinkAction(path=tarinfo.name, - target=tarinfo.linkname) - else: - return unknown.UnknownAction(path=tarinfo.name) def test(filename): - return tarfile.is_tarfile(filename) + return tarfile.is_tarfile(filename) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/bundle/__init__.py b/src/modules/bundle/__init__.py index c9760f784..208d11045 100644 --- a/src/modules/bundle/__init__.py +++ b/src/modules/bundle/__init__.py @@ -33,84 +33,90 @@ "SolarisPackageDirBundle", "DirectoryBundle", "SolarisPackageDatastreamBundle", - "TarBundle" + "TarBundle", ] import os import sys + class InvalidBundleException(Exception): - pass + pass class InvalidOwnershipException(InvalidBundleException): - """Raised when gid or uid lookup fails for a file during bundle - generation.""" - - def __init__(self, fname, uid=None, gid=None): - InvalidBundleException.__init__(self) - self.fname = fname - msg = [] - if uid: - msg.append("UID {uid} is not associated with a user " - "name (file: {fname})".format(uid=uid, fname=fname)) - if gid: - msg.append("GID {gid} is not associated with a group " - "name (file: {fname})".format(gid=gid, fname=fname)) - self.msg = '\n'.join(msg) - - def __str__(self): - return self.msg + """Raised when gid or uid lookup fails for a file during bundle + generation.""" + + def __init__(self, fname, uid=None, gid=None): + InvalidBundleException.__init__(self) + self.fname = fname + msg = [] + if uid: + msg.append( + "UID {uid} is not associated with a user " + "name (file: {fname})".format(uid=uid, fname=fname) + ) + if gid: + msg.append( + "GID {gid} is not associated with a group " + "name (file: {fname})".format(gid=gid, fname=fname) + ) + self.msg = "\n".join(msg) + + def __str__(self): + return self.msg class Bundle(object): - """Base bundle class.""" - - def get_action(self, path): - """Return the first action that matches the provided path or - None.""" - for apath, data in self._walk_bundle(): - if not apath: - continue - npath = apath.lstrip(os.path.sep) - if path == npath: - if type(data) == tuple: - # Construct action on demand. - return self.action(*data) - # Action was returned. - return data + """Base bundle class.""" + + def get_action(self, path): + """Return the first action that matches the provided path or + None.""" + for apath, data in self._walk_bundle(): + if not apath: + continue + npath = apath.lstrip(os.path.sep) + if path == npath: + if type(data) == tuple: + # Construct action on demand. + return self.action(*data) + # Action was returned. + return data + def make_bundle(filename, **kwargs): - """Determines what kind of bundle is at the given filename, and returns - the appropriate bundle object. - """ + """Determines what kind of bundle is at the given filename, and returns + the appropriate bundle object. + """ - for btype in __all__: - bname = "pkg.bundle.{0}".format(btype) - bmodule = __import__(bname) + for btype in __all__: + bname = "pkg.bundle.{0}".format(btype) + bmodule = __import__(bname) - bmodule = sys.modules[bname] - if bmodule.test(filename): - bundle_create = getattr(bmodule, btype) - return bundle_create(filename, **kwargs) + bmodule = sys.modules[bname] + if bmodule.test(filename): + bundle_create = getattr(bmodule, btype) + return bundle_create(filename, **kwargs) - raise TypeError("Unknown bundle type for '{0}'".format(filename)) + raise TypeError("Unknown bundle type for '{0}'".format(filename)) if __name__ == "__main__": + try: + b = make_bundle(sys.argv[1]) + except TypeError as e: + print(e) + sys.exit(1) + + for file in b: + print(file.type, file.attrs) try: - b = make_bundle(sys.argv[1]) - except TypeError as e: - print(e) - sys.exit(1) - - for file in b: - print(file.type, file.attrs) - try: - print(file.attrs["file"]) - print(os.stat(file.attrs["file"])) - except: - pass + print(file.attrs["file"]) + print(os.stat(file.attrs["file"])) + except: + pass # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/catalog.py b/src/modules/catalog.py index 48a973f49..c1222c269 100644 --- a/src/modules/catalog.py +++ b/src/modules/catalog.py @@ -25,7 +25,7 @@ """Interfaces and implementation for the Catalog object, as well as functions that operate on lists of package FMRIs.""" -from __future__ import print_function +from __future__ import print_function import copy import calendar import collections @@ -53,3960 +53,4037 @@ from pkg.misc import EmptyDict, EmptyI -FEATURE_UTF8 = 'ooce:utf8' +FEATURE_UTF8 = "ooce:utf8" + class _JSONWriter(object): - """Private helper class used to serialize catalog data and generate - signatures.""" - - def __init__(self, data, pathname=None, sign=True): - self.__data = data - self.__fileobj = None - - # catalog signatures *must* use sha-1 only since clients - # compare entire dictionaries against the reported hash from - # the catalog in the various .validate() - # methods rather than just attributes within those dictionaries. - # If old clients are to interoperate with new repositories, the - # computed and expected dictionaries must be identical at - # present, so we must use sha-1. - if sign: - if not pathname: - # Only needed if not writing to __fileobj. - self.__sha_1 = hashlib.sha1() - self.__sha_1_value = None - - self.__sign = sign - self.pathname = pathname - - if not pathname: - return + """Private helper class used to serialize catalog data and generate + signatures.""" + + def __init__(self, data, pathname=None, sign=True): + self.__data = data + self.__fileobj = None + + # catalog signatures *must* use sha-1 only since clients + # compare entire dictionaries against the reported hash from + # the catalog in the various .validate() + # methods rather than just attributes within those dictionaries. + # If old clients are to interoperate with new repositories, the + # computed and expected dictionaries must be identical at + # present, so we must use sha-1. + if sign: + if not pathname: + # Only needed if not writing to __fileobj. + self.__sha_1 = hashlib.sha1() + self.__sha_1_value = None + + self.__sign = sign + self.pathname = pathname + + if not pathname: + return + + try: + tfile = open(pathname, "wb") + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + self.__fileobj = tfile + + def signatures(self): + """Returns a dictionary mapping digest algorithms to the + hex-encoded digest values of the text of the catalog.""" + + if not self.__sign: + return {} + return {"sha-1": self.__sha_1_value} + + def _feature(self, feature): + return "_FEATURE" in self.__data and feature in self.__data["_FEATURE"] + + def _dump( + self, + obj, + fp, + skipkeys=False, + ensure_ascii=True, + allow_nan=True, + indent=None, + default=None, + **kw, + ): + if not self._feature(FEATURE_UTF8): + kw["ensure_ascii"] = True + json.dump(obj=obj, stream=fp, indent=None, **kw) + + def save(self): + """Serializes and stores the provided data in JSON format.""" + + # sort_keys is necessary to ensure consistent signature + # generation. It has a minimal performance cost as well (on + # on SPARC and x86), so shouldn't be an issue. However, it + # is only needed if the caller has indicated that the content + # should be signed. + + # Whenever possible, avoid using the write wrapper (self) as + # this can greatly increase write times. + out = self.__fileobj + if not out: + out = self + + self._dump(self.__data, out, sort_keys=self.__sign) + out.write(b"\n") + + if self.__fileobj: + self.__fileobj.close() + + if not self.__sign or not self.__fileobj: + # Can't sign unless a file object is provided. And if + # one is provided, but no signing is to be done, then + # ensure the fileobject is discarded. + self.__fileobj = None + if self.__sign: + self.__sha_1_value = self.__sha_1.hexdigest() + return + + # Ensure file object goes out of scope. + self.__fileobj = None + + # Calculating sha-1 this way is much faster than intercepting + # write calls because of the excessive number of write calls + # that json.dump() triggers (1M+ for /dev catalog files). + self.__sha_1_value = misc.get_data_digest( + self.pathname, hash_func=hashlib.sha1 + )[0] + + # Open the JSON file so that the signature data can be added. + with open(self.pathname, "rb+") as sfile: + # The last bytes should be "}\n", which is where the + # signature data structure needs to be appended. + sfile.seek(-2, os.SEEK_END) + + # Add the signature data and close. + sfoffset = sfile.tell() + if sfoffset > 1: + # Catalog is not empty, so a separator is + # needed. + sfile.write(b",") + sfile.write(b'"_SIGNATURE":') + self._dump(self.signatures(), sfile) + sfile.write(b"}\n") + + def write(self, data): + """Wrapper function that should not be called by external + consumers.""" + + if self.__sign: + self.__sha_1.update(data) + + def writelines(self, iterable): + """Wrapper function that should not be called by external + consumers.""" + + for l in iterable: + self.__sha_1.update(l) + + def __str__(self): + if self.pathname: + return "JSONWriter to {}".format(self.pathname) + return "JSONWriter to memory" - try: - tfile = open(pathname, "wb") - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - self.__fileobj = tfile - - def signatures(self): - """Returns a dictionary mapping digest algorithms to the - hex-encoded digest values of the text of the catalog.""" - - if not self.__sign: - return {} - return { "sha-1": self.__sha_1_value } - - def _feature(self, feature): - return ('_FEATURE' in self.__data - and feature in self.__data['_FEATURE']) - - def _dump(self, obj, fp, skipkeys=False, ensure_ascii=True, - allow_nan=True, indent=None, default=None, **kw): - - if not self._feature(FEATURE_UTF8): - kw['ensure_ascii'] = True - json.dump(obj=obj, stream=fp, indent=None, **kw) - - def save(self): - """Serializes and stores the provided data in JSON format.""" - - # sort_keys is necessary to ensure consistent signature - # generation. It has a minimal performance cost as well (on - # on SPARC and x86), so shouldn't be an issue. However, it - # is only needed if the caller has indicated that the content - # should be signed. - - # Whenever possible, avoid using the write wrapper (self) as - # this can greatly increase write times. - out = self.__fileobj - if not out: - out = self - - self._dump(self.__data, out, sort_keys=self.__sign) - out.write(b"\n") - - if self.__fileobj: - self.__fileobj.close() - - if not self.__sign or not self.__fileobj: - # Can't sign unless a file object is provided. And if - # one is provided, but no signing is to be done, then - # ensure the fileobject is discarded. - self.__fileobj = None - if self.__sign: - self.__sha_1_value = self.__sha_1.hexdigest() - return - - # Ensure file object goes out of scope. - self.__fileobj = None - - # Calculating sha-1 this way is much faster than intercepting - # write calls because of the excessive number of write calls - # that json.dump() triggers (1M+ for /dev catalog files). - self.__sha_1_value = misc.get_data_digest(self.pathname, - hash_func=hashlib.sha1)[0] - - # Open the JSON file so that the signature data can be added. - with open(self.pathname, "rb+") as sfile: - # The last bytes should be "}\n", which is where the - # signature data structure needs to be appended. - sfile.seek(-2, os.SEEK_END) - - # Add the signature data and close. - sfoffset = sfile.tell() - if sfoffset > 1: - # Catalog is not empty, so a separator is - # needed. - sfile.write(b",") - sfile.write(b'"_SIGNATURE":') - self._dump(self.signatures(), sfile) - sfile.write(b"}\n") - - def write(self, data): - """Wrapper function that should not be called by external - consumers.""" - - if self.__sign: - self.__sha_1.update(data) - - def writelines(self, iterable): - """Wrapper function that should not be called by external - consumers.""" - - for l in iterable: - self.__sha_1.update(l) - - def __str__(self): - if self.pathname: - return 'JSONWriter to {}'.format(self.pathname) - return 'JSONWriter to memory' class CatalogPartBase(object): - """A CatalogPartBase object is an abstract class containing core - functionality shared between CatalogPart and CatalogAttrs.""" - - # The file mode to be used for all catalog files. - __file_mode = stat.S_IRUSR|stat.S_IWUSR|stat.S_IRGRP|stat.S_IROTH - - __meta_root = None - last_modified = None - loaded = False - name = None - sign = True - signatures = None - features = None - - def __init__(self, name, meta_root=None, sign=True): - """Initializes a CatalogPartBase object.""" - - self.meta_root = meta_root - # Sanity check: part names can't be pathname-ish. - if name != os.path.basename(name): - raise UnrecognizedCatalogPart(name) - self.name = name - self.sign = sign - self.signatures = {} - self.features = [] - - if not self.meta_root or not self.exists: - # Operations shouldn't attempt to load the part data - # unless meta_root is defined and the data exists. - self.loaded = True - self.last_modified = datetime.datetime.utcnow() - else: - self.last_modified = self.__last_modified() + """A CatalogPartBase object is an abstract class containing core + functionality shared between CatalogPart and CatalogAttrs.""" + + # The file mode to be used for all catalog files. + __file_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + __meta_root = None + last_modified = None + loaded = False + name = None + sign = True + signatures = None + features = None + + def __init__(self, name, meta_root=None, sign=True): + """Initializes a CatalogPartBase object.""" + + self.meta_root = meta_root + # Sanity check: part names can't be pathname-ish. + if name != os.path.basename(name): + raise UnrecognizedCatalogPart(name) + self.name = name + self.sign = sign + self.signatures = {} + self.features = [] + + if not self.meta_root or not self.exists: + # Operations shouldn't attempt to load the part data + # unless meta_root is defined and the data exists. + self.loaded = True + self.last_modified = datetime.datetime.utcnow() + else: + self.last_modified = self.__last_modified() - @staticmethod - def _gen_signatures(data): - f = _JSONWriter(data) - f.save() - return f.signatures() + @staticmethod + def _gen_signatures(data): + f = _JSONWriter(data) + f.save() + return f.signatures() - def __get_meta_root(self): - return self.__meta_root + def __get_meta_root(self): + return self.__meta_root - def __last_modified(self): - """A UTC datetime object representing the time the file used to - to store object metadata was modified, or None if it does not - exist yet.""" + def __last_modified(self): + """A UTC datetime object representing the time the file used to + to store object metadata was modified, or None if it does not + exist yet.""" - if not self.exists: - return None + if not self.exists: + return None - try: - mod_time = os.stat(self.pathname).st_mtime - except EnvironmentError as e: - if e.errno == errno.ENOENT: - return None - raise - return datetime.datetime.utcfromtimestamp(mod_time) - - def __set_meta_root(self, path): - if path: - path = os.path.abspath(path) - self.__meta_root = path - - def destroy(self): - """Removes any on-disk files that exist for the catalog part and - discards all content.""" - - if self.pathname: - if os.path.exists(self.pathname): - try: - portable.remove(self.pathname) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - self.signatures = {} - self.features = [] - self.loaded = False - self.last_modified = None - - @property - def exists(self): - """A boolean value indicating wheher a file for the catalog part - exists at /.""" - - if not self.pathname: - return False - return os.path.exists(self.pathname) - - def load(self): - """Load the serialized data for the catalog part and return the - resulting structure.""" - - location = os.path.join(self.meta_root, self.name) + try: + mod_time = os.stat(self.pathname).st_mtime + except EnvironmentError as e: + if e.errno == errno.ENOENT: + return None + raise + return datetime.datetime.utcfromtimestamp(mod_time) - try: - fobj = open(location, "rb") - except EnvironmentError as e: - if e.errno == errno.ENOENT: - raise api_errors.RetrievalError(e, - location=location) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - raise + def __set_meta_root(self, path): + if path: + path = os.path.abspath(path) + self.__meta_root = path + + def destroy(self): + """Removes any on-disk files that exist for the catalog part and + discards all content.""" + if self.pathname: + if os.path.exists(self.pathname): try: - struct = json.load(fobj) + portable.remove(self.pathname) except EnvironmentError as e: - raise api_errors.RetrievalError(e) - except ValueError as e: - # Not a valid catalog file. - raise api_errors.InvalidCatalogFile(location) + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + self.signatures = {} + self.features = [] + self.loaded = False + self.last_modified = None + + @property + def exists(self): + """A boolean value indicating wheher a file for the catalog part + exists at /.""" + + if not self.pathname: + return False + return os.path.exists(self.pathname) + + def load(self): + """Load the serialized data for the catalog part and return the + resulting structure.""" + + location = os.path.join(self.meta_root, self.name) - fobj.close() + try: + fobj = open(location, "rb") + except EnvironmentError as e: + if e.errno == errno.ENOENT: + raise api_errors.RetrievalError(e, location=location) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + raise - self.loaded = True - # Signature data, if present, should be removed from the struct - # on load and then stored in the signatures object property. - self.signatures = struct.pop("_SIGNATURE", {}) - if "_FEATURE" in struct: - self.features = struct["_FEATURE"] - return struct + try: + struct = json.load(fobj) + except EnvironmentError as e: + raise api_errors.RetrievalError(e) + except ValueError as e: + # Not a valid catalog file. + raise api_errors.InvalidCatalogFile(location) - @property - def pathname(self): - """The absolute path of the file used to store the data for - this part or None if meta_root or name is not set.""" + fobj.close() - if not self.meta_root or not self.name: - return None - return os.path.join(self.meta_root, self.name) + self.loaded = True + # Signature data, if present, should be removed from the struct + # on load and then stored in the signatures object property. + self.signatures = struct.pop("_SIGNATURE", {}) + if "_FEATURE" in struct: + self.features = struct["_FEATURE"] + return struct - def save(self, data): - """Serialize and store the transformed catalog part's 'data' in - a file using the pathname /. + @property + def pathname(self): + """The absolute path of the file used to store the data for + this part or None if meta_root or name is not set.""" - 'data' must be a dict.""" + if not self.meta_root or not self.name: + return None + return os.path.join(self.meta_root, self.name) - f = _JSONWriter(data, pathname=self.pathname, sign=self.sign) - f.save() + def save(self, data): + """Serialize and store the transformed catalog part's 'data' in + a file using the pathname /. - # Update in-memory copy to reflect stored data. - self.signatures = f.signatures() + 'data' must be a dict.""" - # Ensure the permissions on the new file are correct. - try: - os.chmod(self.pathname, self.__file_mode) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - - # Finally, set the file times to match the last catalog change. - if self.last_modified: - mtime = calendar.timegm( - self.last_modified.utctimetuple()) - os.utime(self.pathname, (mtime, mtime)) - - def set_feature(self, feature, state): - if state: - if feature not in self.features: - self.features.append(feature) - else: - if feature in self.features: - self.features.remove(feature) + f = _JSONWriter(data, pathname=self.pathname, sign=self.sign) + f.save() - def feature(self, feature): - return feature in self.features + # Update in-memory copy to reflect stored data. + self.signatures = f.signatures() - meta_root = property(__get_meta_root, __set_meta_root) + # Ensure the permissions on the new file are correct. + try: + os.chmod(self.pathname, self.__file_mode) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + + # Finally, set the file times to match the last catalog change. + if self.last_modified: + mtime = calendar.timegm(self.last_modified.utctimetuple()) + os.utime(self.pathname, (mtime, mtime)) + + def set_feature(self, feature, state): + if state: + if feature not in self.features: + self.features.append(feature) + else: + if feature in self.features: + self.features.remove(feature) + def feature(self, feature): + return feature in self.features -class CatalogPart(CatalogPartBase): - """A CatalogPart object is the representation of a subset of the package - FMRIs available from a package repository.""" - - __data = None - ordered = None - - def __init__(self, name, meta_root=None, ordered=True, sign=True): - """Initializes a CatalogPart object.""" - - self.__data = {} - self.ordered = ordered - if not name.startswith("catalog."): - raise UnrecognizedCatalogPart(name) - CatalogPartBase.__init__(self, name, meta_root=meta_root, - sign=sign) - - def __iter_entries(self, last=False, ordered=False, pubs=EmptyI): - """Private generator function to iterate over catalog entries. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the CatalogPart has been saved since the last - modifying operation, or sort() has has been called, this will - also be the newest version of the package. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - self.load() - if ordered: - stems = self.pkg_names(pubs=pubs) - else: - stems = ( - (pub, stem) - for pub in self.publishers(pubs=pubs) - for stem in self.__data[pub] - ) + meta_root = property(__get_meta_root, __set_meta_root) - if last: - return ( - (pub, stem, self.__data[pub][stem][-1]) - for pub, stem in stems - ) - if ordered: - return ( - (pub, stem, entry) - for pub, stem in stems - for entry in reversed(self.__data[pub][stem]) - ) - return ( - (pub, stem, entry) - for pub, stem in stems - for entry in self.__data[pub][stem] +class CatalogPart(CatalogPartBase): + """A CatalogPart object is the representation of a subset of the package + FMRIs available from a package repository.""" + + __data = None + ordered = None + + def __init__(self, name, meta_root=None, ordered=True, sign=True): + """Initializes a CatalogPart object.""" + + self.__data = {} + self.ordered = ordered + if not name.startswith("catalog."): + raise UnrecognizedCatalogPart(name) + CatalogPartBase.__init__(self, name, meta_root=meta_root, sign=sign) + + def __iter_entries(self, last=False, ordered=False, pubs=EmptyI): + """Private generator function to iterate over catalog entries. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the CatalogPart has been saved since the last + modifying operation, or sort() has has been called, this will + also be the newest version of the package. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + self.load() + if ordered: + stems = self.pkg_names(pubs=pubs) + else: + stems = ( + (pub, stem) + for pub in self.publishers(pubs=pubs) + for stem in self.__data[pub] + ) + + if last: + return ( + (pub, stem, self.__data[pub][stem][-1]) for pub, stem in stems + ) + + if ordered: + return ( + (pub, stem, entry) + for pub, stem in stems + for entry in reversed(self.__data[pub][stem]) + ) + return ( + (pub, stem, entry) + for pub, stem in stems + for entry in self.__data[pub][stem] + ) + + def add( + self, + pfmri=None, + metadata=None, + op_time=None, + pub=None, + stem=None, + ver=None, + ): + """Add a catalog entry for a given FMRI or FMRI components. + + 'metadata' is an optional dict containing the catalog + metadata that should be stored for the specified FMRI. + + The dict representing the entry is returned to callers, + but should not be modified. + """ + + assert pfmri or (pub and stem and ver) + if pfmri and not pfmri.publisher: + raise api_errors.AnarchicalCatalogFMRI(str(pfmri)) + + if not self.loaded: + # Hot path, so avoid calling load unless necessary, even + # though it performs this check already. + self.load() + + if pfmri: + pub, stem, ver = pfmri.tuple() + ver = str(ver) + + pkg_list = self.__data.setdefault(pub, {}) + ver_list = pkg_list.setdefault(stem, []) + for entry in ver_list: + if entry["version"] == ver: + if not pfmri: + pfmri = "pkg://{0}/{1}@{2}".format(pub, stem, ver) + raise api_errors.DuplicateCatalogEntry( + pfmri, operation="add", catalog_name=self.pathname ) - def add(self, pfmri=None, metadata=None, op_time=None, pub=None, - stem=None, ver=None): - """Add a catalog entry for a given FMRI or FMRI components. - - 'metadata' is an optional dict containing the catalog - metadata that should be stored for the specified FMRI. - - The dict representing the entry is returned to callers, - but should not be modified. - """ - - assert pfmri or (pub and stem and ver) - if pfmri and not pfmri.publisher: - raise api_errors.AnarchicalCatalogFMRI(str(pfmri)) - - if not self.loaded: - # Hot path, so avoid calling load unless necessary, even - # though it performs this check already. - self.load() - - if pfmri: - pub, stem, ver = pfmri.tuple() - ver = str(ver) - - pkg_list = self.__data.setdefault(pub, {}) - ver_list = pkg_list.setdefault(stem, []) - for entry in ver_list: - if entry["version"] == ver: - if not pfmri: - pfmri = "pkg://{0}/{1}@{2}".format(pub, - stem, ver) - raise api_errors.DuplicateCatalogEntry( - pfmri, operation="add", - catalog_name=self.pathname) - - if metadata is not None: - entry = metadata - else: - entry = {} - entry["version"] = ver - - ver_list.append(entry) - if self.ordered: - self.sort(pfmris=set([pfmri])) - - if not op_time: - op_time = datetime.datetime.utcnow() - self.last_modified = op_time - self.signatures = {} + if metadata is not None: + entry = metadata + else: + entry = {} + entry["version"] = ver + + ver_list.append(entry) + if self.ordered: + self.sort(pfmris=set([pfmri])) + + if not op_time: + op_time = datetime.datetime.utcnow() + self.last_modified = op_time + self.signatures = {} + return entry + + def destroy(self): + """Removes any on-disk files that exist for the catalog part and + discards all content.""" + + self.__data = {} + return CatalogPartBase.destroy(self) + + def entries(self, cb=None, last=False, ordered=False, pubs=EmptyI): + """A generator function that produces tuples of the form + (fmri, entry) as it iterates over the contents of the catalog + part (where entry is the related catalog entry for the fmri). + Callers should not modify any of the data that is returned. + + 'cb' is an optional callback function that will be executed for + each package. It must accept two arguments: 'pkg' and 'entry'. + 'pkg' is an FMRI object and 'entry' is the dictionary structure + of the catalog entry for the package. If the callback returns + False, then the entry will not be included in the results. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the CatalogPart has been saved since the last + modifying operation, or sort() has has been called, this will + also be the newest version of the package. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. + + Results are always in catalog version order on a per- + publisher, per-stem basis. + """ + + for pub, stem, entry in self.__iter_entries( + last=last, ordered=ordered, pubs=pubs + ): + f = fmri.PkgFmri(name=stem, publisher=pub, version=entry["version"]) + if cb is None or cb(f, entry): + yield f, entry + + def entries_by_version(self, name, pubs=EmptyI): + """A generator function that produces tuples of (version, + entries), where entries is a list of tuples of the format + (fmri, entry) where entry is the catalog entry for the + FMRI) as it iterates over the CatalogPart contents. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + self.load() + + versions = {} + entries = {} + for pub in self.publishers(pubs=pubs): + ver_list = self.__data[pub].get(name, ()) + for entry in ver_list: + sver = entry["version"] + pfmri = fmri.PkgFmri(name=name, publisher=pub, version=sver) + + versions[sver] = pfmri.version + entries.setdefault(sver, []) + entries[sver].append((pfmri, entry)) + + for key, ver in sorted(six.iteritems(versions), key=itemgetter(1)): + yield ver, entries[key] + + def fmris(self, last=False, objects=True, ordered=False, pubs=EmptyI): + """A generator function that produces FMRIs as it iterates + over the contents of the catalog part. + + 'last' is a boolean value that indicates only the last fmri + for each package on a per-publisher basis should be returned. + As long as the CatalogPart has been saved since the last + modifying operation, or sort() has has been called, this will + also be the newest version of the package. + + 'objects' is an optional boolean value indicating whether + FMRIs should be returned as FMRI objects or as strings. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. + + Results are always in catalog version order on a per- + publisher, per-stem basis.""" + + if objects: + for pub, stem, entry in self.__iter_entries( + last=last, ordered=ordered, pubs=pubs + ): + yield fmri.PkgFmri( + name=stem, publisher=pub, version=entry["version"] + ) + return + + for pub, stem, entry in self.__iter_entries( + last=last, ordered=ordered, pubs=pubs + ): + yield "pkg://{0}/{1}@{2}".format(pub, stem, entry["version"]) + return + + def fmris_by_version(self, name, pubs=EmptyI): + """A generator function that produces tuples of (version, + fmris), where fmris is a list of the fmris related to the + version. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + self.load() + + versions = {} + entries = {} + for pub in self.publishers(pubs=pubs): + ver_list = self.__data[pub].get(name, None) + if not ver_list: + continue + + for entry in ver_list: + sver = entry["version"] + pfmri = fmri.PkgFmri(name=name, publisher=pub, version=sver) + + versions[sver] = pfmri.version + entries.setdefault(sver, []) + entries[sver].append(pfmri) + + for key, ver in sorted(six.iteritems(versions), key=itemgetter(1)): + yield ver, entries[key] + + def get_entry(self, pfmri=None, pub=None, stem=None, ver=None): + """Returns the catalog part entry for the given package FMRI or + FMRI components.""" + + assert pfmri or (pub and stem and ver) + if pfmri and not pfmri.publisher: + raise api_errors.AnarchicalCatalogFMRI(str(pfmri)) + + # Since this is a hot path, this function checks for loaded + # status before attempting to call the load function. + if not self.loaded: + self.load() + + if pfmri: + pub, stem, ver = pfmri.tuple() + ver = str(ver) + + pkg_list = self.__data.get(pub, None) + if not pkg_list: + return + + ver_list = pkg_list.get(stem, ()) + for entry in ver_list: + if entry["version"] == ver: return entry - def destroy(self): - """Removes any on-disk files that exist for the catalog part and - discards all content.""" - - self.__data = {} - return CatalogPartBase.destroy(self) - - def entries(self, cb=None, last=False, ordered=False, pubs=EmptyI): - """A generator function that produces tuples of the form - (fmri, entry) as it iterates over the contents of the catalog - part (where entry is the related catalog entry for the fmri). - Callers should not modify any of the data that is returned. - - 'cb' is an optional callback function that will be executed for - each package. It must accept two arguments: 'pkg' and 'entry'. - 'pkg' is an FMRI object and 'entry' is the dictionary structure - of the catalog entry for the package. If the callback returns - False, then the entry will not be included in the results. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the CatalogPart has been saved since the last - modifying operation, or sort() has has been called, this will - also be the newest version of the package. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. - - Results are always in catalog version order on a per- - publisher, per-stem basis. - """ - - for pub, stem, entry in self.__iter_entries(last=last, - ordered=ordered, pubs=pubs): - f = fmri.PkgFmri(name=stem, publisher=pub, - version=entry["version"]) - if cb is None or cb(f, entry): - yield f, entry - - def entries_by_version(self, name, pubs=EmptyI): - """A generator function that produces tuples of (version, - entries), where entries is a list of tuples of the format - (fmri, entry) where entry is the catalog entry for the - FMRI) as it iterates over the CatalogPart contents. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - self.load() - - versions = {} - entries = {} - for pub in self.publishers(pubs=pubs): - ver_list = self.__data[pub].get(name, ()) - for entry in ver_list: - sver = entry["version"] - pfmri = fmri.PkgFmri(name=name, publisher=pub, - version=sver) - - versions[sver] = pfmri.version - entries.setdefault(sver, []) - entries[sver].append((pfmri, entry)) - - for key, ver in sorted(six.iteritems(versions), key=itemgetter(1)): - yield ver, entries[key] - - def fmris(self, last=False, objects=True, ordered=False, pubs=EmptyI): - """A generator function that produces FMRIs as it iterates - over the contents of the catalog part. - - 'last' is a boolean value that indicates only the last fmri - for each package on a per-publisher basis should be returned. - As long as the CatalogPart has been saved since the last - modifying operation, or sort() has has been called, this will - also be the newest version of the package. - - 'objects' is an optional boolean value indicating whether - FMRIs should be returned as FMRI objects or as strings. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. - - Results are always in catalog version order on a per- - publisher, per-stem basis.""" - - if objects: - for pub, stem, entry in self.__iter_entries(last=last, - ordered=ordered, pubs=pubs): - yield fmri.PkgFmri(name=stem, publisher=pub, - version=entry["version"]) - return - - for pub, stem, entry in self.__iter_entries(last=last, - ordered=ordered, pubs=pubs): - yield "pkg://{0}/{1}@{2}".format(pub, - stem, entry["version"]) - return - - def fmris_by_version(self, name, pubs=EmptyI): - """A generator function that produces tuples of (version, - fmris), where fmris is a list of the fmris related to the - version. + def get_package_counts(self): + """Returns a tuple of integer values (package_count, + package_version_count). The first is the number of + unique packages (per-publisher), and the second is the + number of unique package versions (per-publisher and + stem).""" + + self.load() + package_count = 0 + package_version_count = 0 + for pub in self.publishers(): + for stem in self.__data[pub]: + package_count += 1 + package_version_count += len(self.__data[pub][stem]) + return (package_count, package_version_count) + + def get_package_counts_by_pub(self, pubs=EmptyI): + """Returns a generator of tuples of the form (pub, + package_count, package_version_count). 'pub' is the publisher + prefix, 'package_count' is the number of unique packages for the + publisher, and 'package_version_count' is the number of unique + package versions for the publisher. + """ + + self.load() + for pub in self.publishers(pubs=pubs): + package_count = 0 + package_version_count = 0 + for stem in self.__data[pub]: + package_count += 1 + package_version_count += len(self.__data[pub][stem]) + yield pub, package_count, package_version_count + + def load(self): + """Load and transform the catalog part's data, preparing it + for use.""" + + if self.loaded: + # Already loaded, or only in-memory. + return + self.__data = CatalogPartBase.load(self) + + def names(self, pubs=EmptyI): + """Returns a set containing the names of all the packages in + the CatalogPart. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + self.load() + return set( + ( + stem + for pub in self.publishers(pubs=pubs) + for stem in self.__data[pub] + ) + ) + + def pkg_names(self, pubs=EmptyI): + """A generator function that produces package tuples of the form + (pub, stem) as it iterates over the contents of the CatalogPart. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. If specified, publishers will be sorted in + the order given. + + Results are always returned sorted by stem and then by + publisher.""" + + self.load() + + # Results have to be sorted by stem first, and by + # publisher prefix second. + pkg_list = [ + "{0}!{1}".format(stem, pub) + for pub in self.publishers(pubs=pubs) + for stem in self.__data[pub] + ] + + pub_sort = None + if pubs: + pos = dict((p, i) for (i, p) in enumerate(pubs)) + + def pub_key(a): + astem, apub = a.split("!", 1) + return (astem, pos[apub]) + + pub_sort = pub_key + + for entry in sorted(pkg_list, key=pub_sort): + stem, pub = entry.split("!", 1) + yield pub, stem + + def publishers(self, pubs=EmptyI): + """A generator function that returns publisher prefixes as it + iterates over the package data in the CatalogPart. + + 'pubs' is an optional list that contains the prefixes of the + publishers to restrict the results to.""" + + self.load() + for pub in self.__data: + # Any entries starting with "_" are part of the + # reserved catalog namespace. + if not pub[0] == "_" and (not pubs or pub in pubs): + yield pub + + def remove(self, pfmri, op_time=None): + """Remove a package and its metadata.""" + + if not pfmri.publisher: + raise api_errors.AnarchicalCatalogFMRI(pfmri.get_fmri()) + + self.load() + pkg_list = self.__data.get(pfmri.publisher, None) + if not pkg_list: + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + + ver = str(pfmri.version) + ver_list = pkg_list.get(pfmri.pkg_name, []) + for i, entry in enumerate(ver_list): + if entry["version"] == ver: + # Safe to do this since a 'break' is done + # immediately after removals are performed. + del ver_list[i] + if not ver_list: + # When all version entries for a + # package are removed, its stem + # should be also. + del pkg_list[pfmri.pkg_name] + if not pkg_list: + # When all package stems for a + # publisher have been removed, + # it should be also. + del self.__data[pfmri.publisher] + break + else: + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + + if not op_time: + op_time = datetime.datetime.utcnow() + self.last_modified = op_time + self.signatures = {} + + def save(self): + """Transform and store the catalog part's data in a file using + the pathname /.""" + + if not self.meta_root: + # Assume this is in-memory only. + return + + # Ensure content is loaded before attempting save. + self.load() + + if len(self.features): + self.__data["_FEATURE"] = self.features + CatalogPartBase.save(self, self.__data) + + def sort(self, pfmris=None, pubs=None): + """Re-sorts the contents of the CatalogPart such that version + entries for each package stem are in ascending order. + + 'pfmris' is an optional set of FMRIs to restrict the sort to. + This is useful during catalog operations as only entries for + the corresponding package stem(s) need to be sorted. + + 'pubs' is an optional set of publisher prefixes to restrict + the sort to. This is useful during catalog operations as only + entries for the corresponding publisher stem(s) need to be + sorted. This option has no effect if 'pfmris' is also + provided. + + If neither 'pfmris' or 'pubs' is provided, all entries will be + sorted.""" + + def key_func(item): + return pkg.version.Version(item["version"]) + + self.load() + if pfmris is not None: + processed = set() + for f in pfmris: + pkg_stem = f.get_pkg_stem() + if pkg_stem in processed: + continue + processed.add(pkg_stem) + + # The specified FMRI may not exist in this + # CatalogPart, so continue if it does not + # exist. + pkg_list = self.__data.get(f.publisher, None) + if pkg_list: + ver_list = pkg_list.get(f.pkg_name, None) + if ver_list: + ver_list.sort(key=key_func) + return + + for pub in self.publishers(pubs=pubs): + for stem in self.__data[pub]: + self.__data[pub][stem].sort(key=key_func) + + def tuples(self, last=False, ordered=False, pubs=EmptyI): + """A generator function that produces FMRI tuples as it + iterates over the contents of the catalog part. + + 'last' is a boolean value that indicates only the last FMRI + tuple for each package on a per-publisher basis should be + returned. As long as the CatalogPart has been saved since + the last modifying operation, or sort() has has been called, + this will also be the newest version of the package. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + return ( + (pub, stem, entry["version"]) + for pub, stem, entry in self.__iter_entries( + last=last, ordered=ordered, pubs=pubs + ) + ) + + def tuple_entries(self, cb=None, last=False, ordered=False, pubs=EmptyI): + """A generator function that produces tuples of the form ((pub, + stem, version), entry) as it iterates over the contents of the + catalog part (where entry is the related catalog entry for the + fmri). Callers should not modify any of the data that is + returned. + + 'cb' is an optional callback function that will be executed for + each package. It must accept two arguments: 'pkg' and 'entry'. + 'pkg' is an FMRI tuple and 'entry' is the dictionary structure + of the catalog entry for the package. If the callback returns + False, then the entry will not be included in the results. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the CatalogPart has been saved since the last + modifying operation, or sort() has has been called, this will + also be the newest version of the package. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. + + Results are always in catalog version order on a per-publisher, + per-stem basis.""" + + for pub, stem, entry in self.__iter_entries( + last=last, ordered=ordered, pubs=pubs + ): + t = (pub, stem, entry["version"]) + if cb is None or cb(t, entry): + yield t, entry + + def validate(self, signatures=None, require_signatures=False): + """Verifies whether the signatures for the contents of the + CatalogPart match the specified signature data, or if not + provided, the current signature data. Raises the exception + named 'BadCatalogSignatures' on failure.""" + + if not self.signatures and not signatures and not require_signatures: + # Nothing to validate, and we're not required to. + return + + # Ensure content is loaded before attempting to retrieve + # or generate signature data. + self.load() + if not signatures: + signatures = self.signatures + + new_signatures = self._gen_signatures(self.__data) + if new_signatures != signatures: + raise api_errors.BadCatalogSignatures(self.pathname) - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - self.load() +class CatalogUpdate(CatalogPartBase): + """A CatalogUpdate object is an augmented representation of a subset + of the package data contained within a Catalog.""" - versions = {} - entries = {} - for pub in self.publishers(pubs=pubs): - ver_list = self.__data[pub].get(name, None) - if not ver_list: - continue + # Properties. + __data = None + last_modified = None - for entry in ver_list: - sver = entry["version"] - pfmri = fmri.PkgFmri(name=name, publisher=pub, - version=sver) + # Operation constants. + ADD = "add" + REMOVE = "remove" - versions[sver] = pfmri.version - entries.setdefault(sver, []) - entries[sver].append(pfmri) + def __init__(self, name, meta_root=None, sign=True): + """Initializes a CatalogUpdate object.""" - for key, ver in sorted(six.iteritems(versions), key=itemgetter(1)): - yield ver, entries[key] + self.__data = {} + if not name.startswith("update."): + raise UnrecognizedCatalogPart(name) + CatalogPartBase.__init__(self, name, meta_root=meta_root, sign=sign) - def get_entry(self, pfmri=None, pub=None, stem=None, ver=None): - """Returns the catalog part entry for the given package FMRI or - FMRI components.""" + def add(self, pfmri, operation, op_time, metadata=None): + """Records the specified catalog operation and any related + catalog metadata for the specified package FMRI. - assert pfmri or (pub and stem and ver) - if pfmri and not pfmri.publisher: - raise api_errors.AnarchicalCatalogFMRI(str(pfmri)) + 'operation' must be one of the following constant values + provided by the CatalogUpdate class: + ADD + REMOVE - # Since this is a hot path, this function checks for loaded - # status before attempting to call the load function. - if not self.loaded: - self.load() + 'op_time' is a UTC datetime object indicating the time + the catalog operation was performed. - if pfmri: - pub, stem, ver = pfmri.tuple() - ver = str(ver) + 'metadata' is an optional dict containing the catalog + metadata that should be stored for the specified FMRI + indexed by catalog part (e.g. "dependency", "summary", + etc.).""" - pkg_list = self.__data.get(pub, None) - if not pkg_list: - return - - ver_list = pkg_list.get(stem, ()) - for entry in ver_list: - if entry["version"] == ver: - return entry - - def get_package_counts(self): - """Returns a tuple of integer values (package_count, - package_version_count). The first is the number of - unique packages (per-publisher), and the second is the - number of unique package versions (per-publisher and - stem).""" - - self.load() - package_count = 0 - package_version_count = 0 - for pub in self.publishers(): - for stem in self.__data[pub]: - package_count += 1 - package_version_count += \ - len(self.__data[pub][stem]) - return (package_count, package_version_count) - - def get_package_counts_by_pub(self, pubs=EmptyI): - """Returns a generator of tuples of the form (pub, - package_count, package_version_count). 'pub' is the publisher - prefix, 'package_count' is the number of unique packages for the - publisher, and 'package_version_count' is the number of unique - package versions for the publisher. - """ - - self.load() - for pub in self.publishers(pubs=pubs): - package_count = 0 - package_version_count = 0 - for stem in self.__data[pub]: - package_count += 1 - package_version_count += \ - len(self.__data[pub][stem]) - yield pub, package_count, package_version_count - - def load(self): - """Load and transform the catalog part's data, preparing it - for use.""" - - if self.loaded: - # Already loaded, or only in-memory. - return - self.__data = CatalogPartBase.load(self) - - def names(self, pubs=EmptyI): - """Returns a set containing the names of all the packages in - the CatalogPart. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - self.load() - return set(( - stem - for pub in self.publishers(pubs=pubs) - for stem in self.__data[pub] - )) - - def pkg_names(self, pubs=EmptyI): - """A generator function that produces package tuples of the form - (pub, stem) as it iterates over the contents of the CatalogPart. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. If specified, publishers will be sorted in - the order given. - - Results are always returned sorted by stem and then by - publisher.""" - - self.load() - - # Results have to be sorted by stem first, and by - # publisher prefix second. - pkg_list = [ - "{0}!{1}".format(stem, pub) - for pub in self.publishers(pubs=pubs) - for stem in self.__data[pub] - ] - - pub_sort = None - if pubs: - pos = dict((p, i) for (i, p) in enumerate(pubs)) - def pub_key(a): - astem, apub = a.split("!", 1) - return (astem, pos[apub]) - pub_sort = pub_key - - for entry in sorted(pkg_list, key=pub_sort): - stem, pub = entry.split("!", 1) - yield pub, stem - - def publishers(self, pubs=EmptyI): - """A generator function that returns publisher prefixes as it - iterates over the package data in the CatalogPart. - - 'pubs' is an optional list that contains the prefixes of the - publishers to restrict the results to.""" - - self.load() - for pub in self.__data: - # Any entries starting with "_" are part of the - # reserved catalog namespace. - if not pub[0] == "_" and (not pubs or pub in pubs): - yield pub - - def remove(self, pfmri, op_time=None): - """Remove a package and its metadata.""" - - if not pfmri.publisher: - raise api_errors.AnarchicalCatalogFMRI(pfmri.get_fmri()) - - self.load() - pkg_list = self.__data.get(pfmri.publisher, None) - if not pkg_list: - raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + if not pfmri.publisher: + raise api_errors.AnarchicalCatalogFMRI(pfmri.get_fmri()) - ver = str(pfmri.version) - ver_list = pkg_list.get(pfmri.pkg_name, []) - for i, entry in enumerate(ver_list): - if entry["version"] == ver: - # Safe to do this since a 'break' is done - # immediately after removals are performed. - del ver_list[i] - if not ver_list: - # When all version entries for a - # package are removed, its stem - # should be also. - del pkg_list[pfmri.pkg_name] - if not pkg_list: - # When all package stems for a - # publisher have been removed, - # it should be also. - del self.__data[pfmri.publisher] - break - else: - raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + if operation not in (self.ADD, self.REMOVE): + raise api_errors.UnknownUpdateType(operation) - if not op_time: - op_time = datetime.datetime.utcnow() - self.last_modified = op_time - self.signatures = {} - - def save(self): - """Transform and store the catalog part's data in a file using - the pathname /.""" - - if not self.meta_root: - # Assume this is in-memory only. - return - - # Ensure content is loaded before attempting save. - self.load() - - if len(self.features): - self.__data['_FEATURE'] = self.features - CatalogPartBase.save(self, self.__data) - - def sort(self, pfmris=None, pubs=None): - """Re-sorts the contents of the CatalogPart such that version - entries for each package stem are in ascending order. - - 'pfmris' is an optional set of FMRIs to restrict the sort to. - This is useful during catalog operations as only entries for - the corresponding package stem(s) need to be sorted. - - 'pubs' is an optional set of publisher prefixes to restrict - the sort to. This is useful during catalog operations as only - entries for the corresponding publisher stem(s) need to be - sorted. This option has no effect if 'pfmris' is also - provided. - - If neither 'pfmris' or 'pubs' is provided, all entries will be - sorted.""" - - def key_func(item): - return pkg.version.Version(item["version"]) - - self.load() - if pfmris is not None: - processed = set() - for f in pfmris: - pkg_stem = f.get_pkg_stem() - if pkg_stem in processed: - continue - processed.add(pkg_stem) - - # The specified FMRI may not exist in this - # CatalogPart, so continue if it does not - # exist. - pkg_list = self.__data.get(f.publisher, None) - if pkg_list: - ver_list = pkg_list.get(f.pkg_name, - None) - if ver_list: - ver_list.sort(key=key_func) - return - - for pub in self.publishers(pubs=pubs): - for stem in self.__data[pub]: - self.__data[pub][stem].sort(key=key_func) - - def tuples(self, last=False, ordered=False, pubs=EmptyI): - """A generator function that produces FMRI tuples as it - iterates over the contents of the catalog part. - - 'last' is a boolean value that indicates only the last FMRI - tuple for each package on a per-publisher basis should be - returned. As long as the CatalogPart has been saved since - the last modifying operation, or sort() has has been called, - this will also be the newest version of the package. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - return ( - (pub, stem, entry["version"]) - for pub, stem, entry in self.__iter_entries(last=last, - ordered=ordered, pubs=pubs) - ) + self.load() + self.__data.setdefault(pfmri.publisher, {}) + pkg_list = self.__data[pfmri.publisher] - def tuple_entries(self, cb=None, last=False, ordered=False, pubs=EmptyI): - """A generator function that produces tuples of the form ((pub, - stem, version), entry) as it iterates over the contents of the - catalog part (where entry is the related catalog entry for the - fmri). Callers should not modify any of the data that is - returned. - - 'cb' is an optional callback function that will be executed for - each package. It must accept two arguments: 'pkg' and 'entry'. - 'pkg' is an FMRI tuple and 'entry' is the dictionary structure - of the catalog entry for the package. If the callback returns - False, then the entry will not be included in the results. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the CatalogPart has been saved since the last - modifying operation, or sort() has has been called, this will - also be the newest version of the package. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. - - Results are always in catalog version order on a per-publisher, - per-stem basis.""" - - for pub, stem, entry in self.__iter_entries(last=last, - ordered=ordered, pubs=pubs): - t = (pub, stem, entry["version"]) - if cb is None or cb(t, entry): - yield t, entry - - def validate(self, signatures=None, require_signatures=False): - """Verifies whether the signatures for the contents of the - CatalogPart match the specified signature data, or if not - provided, the current signature data. Raises the exception - named 'BadCatalogSignatures' on failure.""" - - if not self.signatures and not signatures and \ - not require_signatures: - # Nothing to validate, and we're not required to. - return - - # Ensure content is loaded before attempting to retrieve - # or generate signature data. - self.load() - if not signatures: - signatures = self.signatures - - new_signatures = self._gen_signatures(self.__data) - if new_signatures != signatures: - raise api_errors.BadCatalogSignatures(self.pathname) + pkg_list.setdefault(pfmri.pkg_name, []) + ver_list = pkg_list[pfmri.pkg_name] + if metadata is not None: + entry = metadata + else: + entry = {} + entry["op-time"] = datetime_to_basic_ts(op_time) + entry["op-type"] = operation + entry["version"] = str(pfmri.version) + ver_list.append(entry) + + # To ensure the update log is viewed as having been updated + # at the exact same time as the catalog, the last_modified + # time of the update log must match the operation time. + self.last_modified = op_time + self.signatures = {} + + def load(self): + """Load and transform the catalog update's data, preparing it + for use.""" + + if self.loaded: + # Already loaded, or only in-memory. + return + self.__data = CatalogPartBase.load(self) + + def publishers(self): + """A generator function that returns publisher prefixes as it + iterates over the package data in the CatalogUpdate.""" + + self.load() + for pub in self.__data: + # Any entries starting with "_" are part of the + # reserved catalog namespace. + if not pub[0] == "_": + yield pub + + def save(self): + """Transform and store the catalog update's data in a file using + the pathname /.""" + + if not self.meta_root: + # Assume this is in-memory only. + return + + # Ensure content is loaded before attempting save. + self.load() + + if len(self.features): + self.__data["_FEATURE"] = self.features + CatalogPartBase.save(self, self.__data) + + def updates(self): + """A generator function that produces tuples of the format + (fmri, op_type, op_time, metadata). Where: + + * 'fmri' is a PkgFmri object for the package. + + * 'op_type' is a CatalogUpdate constant indicating + the catalog operation performed. + + * 'op_time' is a UTC datetime object representing the + time time the catalog operation was performed. + + * 'metadata' is a dict containing the catalog metadata + for the FMRI indexed by catalog part name. + + Results are always in ascending operation time order on a + per-publisher, per-stem basis. + """ + + self.load() + + def get_update(pub, stem, entry): + mdata = {} + for key in entry: + if key.startswith("catalog."): + mdata[key] = entry[key] + op_time = basic_ts_to_datetime(entry["op-time"]) + pfmri = fmri.PkgFmri( + name=stem, publisher=pub, version=entry["version"] + ) + return (pfmri, entry["op-type"], op_time, mdata) + + for pub in self.publishers(): + for stem in self.__data[pub]: + for entry in self.__data[pub][stem]: + yield get_update(pub, stem, entry) + return + + def validate(self, signatures=None, require_signatures=False): + """Verifies whether the signatures for the contents of the + CatalogUpdate match the specified signature data, or if not + provided, the current signature data. Raises the exception + named 'BadCatalogSignatures' on failure.""" + + if not self.signatures and not signatures and not require_signatures: + # Nothing to validate, and we're not required to. + return + + # Ensure content is loaded before attempting to retrieve + # or generate signature data. + self.load() + if not signatures: + signatures = self.signatures + + new_signatures = self._gen_signatures(self.__data) + if new_signatures != signatures: + raise api_errors.BadCatalogSignatures(self.pathname) -class CatalogUpdate(CatalogPartBase): - """A CatalogUpdate object is an augmented representation of a subset - of the package data contained within a Catalog.""" - # Properties. - __data = None - last_modified = None +class CatalogAttrs(CatalogPartBase): + """A CatalogAttrs object is the representation of the attributes of a + Catalog object.""" + + # Properties. + __data = None + + # This structure defines defaults (for use in __init__) as well as + # the set of required elements for this catalog part. See also the + # logic in load(). + __DEFAULT_ELEMS = { + "created": None, + "last-modified": None, + "package-count": 0, + "package-version-count": 0, + "parts": {}, + "updates": {}, + "version": 1, + } + + def __init__(self, meta_root=None, sign=True): + """Initializes a CatalogAttrs object.""" + + self.__data = {} + CatalogPartBase.__init__( + self, name="catalog.attrs", meta_root=meta_root, sign=sign + ) + + if self.loaded: + # If the data is already seen as 'loaded' during init, + # this is actually a new object, so setup some sane + # defaults. + created = self.__data["last-modified"] + self.__data = copy.deepcopy(self.__DEFAULT_ELEMS) + self.__data["created"] = created + self.__data["last-modified"] = created + else: + # Assume that the attributes of the catalog can be + # obtained from a file. + self.load() + + def __get_created(self): + return self.__data["created"] + + def __get_last_modified(self): + return self.__data["last-modified"] + + def __get_package_count(self): + return self.__data["package-count"] + + def __get_package_version_count(self): + return self.__data["package-version-count"] + + def __get_parts(self): + return self.__data["parts"] + + def __get_updates(self): + return self.__data["updates"] + + def __get_version(self): + return self.__data["version"] + + def __set_created(self, value): + self.__data["created"] = value + self.signatures = {} + + def __set_last_modified(self, value): + self.__data["last-modified"] = value + self.signatures = {} + + def __set_package_count(self, value): + self.__data["package-count"] = value + self.signatures = {} + + def __set_package_version_count(self, value): + self.__data["package-version-count"] = value + self.signatures = {} + + def __set_parts(self, value): + self.__data["parts"] = value + self.signatures = {} + + def __set_updates(self, value): + self.__data["updates"] = value + self.signatures = {} + + def __set_version(self, value): + self.__data["version"] = value + self.signatures = {} + + def __transform(self): + """Duplicate and transform 'self.__data' for saving.""" + + # Use a copy to prevent the in-memory version from being + # affected by the transformations. + struct = copy.deepcopy(self.__data) + for key, val in six.iteritems(struct): + if isinstance(val, datetime.datetime): + # Convert datetime objects to an ISO-8601 + # basic format string. + struct[key] = datetime_to_basic_ts(val) + continue + + if key in ("parts", "updates"): + for e in val: + lm = val[e].get("last-modified", None) + if lm: + lm = datetime_to_basic_ts(lm) + val[e]["last-modified"] = lm + return struct + + def load(self): + """Load and transform the catalog attribute data.""" + + if self.loaded: + # Already loaded, or only in-memory. + return + location = os.path.join(self.meta_root, self.name) + + struct = CatalogPartBase.load(self) + # Check to see that struct is as we expect: it must be a dict + # and have all of the elements in self.__DEFAULT_ELEMS. + if type(struct) != dict or not ( + set(self.__DEFAULT_ELEMS.keys()) <= set(struct.keys()) + ): + raise api_errors.InvalidCatalogFile(location) + + def cat_ts_to_datetime(val): + try: + return basic_ts_to_datetime(val) + except ValueError: + raise api_errors.InvalidCatalogFile(location) + + for key, val in six.iteritems(struct): + if key in ("created", "last-modified"): + # Convert ISO-8601 basic format strings to + # datetime objects. These dates can be + # 'null' due to v0 catalog transformations. + if val: + struct[key] = cat_ts_to_datetime(val) + continue + + if key in ("parts", "updates"): + if type(val) != dict: + raise api_errors.InvalidCatalogFile(location) + + # 'parts' and 'updates' have a more complex + # structure. Check that all of the subparts + # look sane. + for subpart in val: + if subpart != os.path.basename(subpart): + raise api_errors.UnrecognizedCatalogPart( + "{0} {{{1}: {2}}}".format(self.name, key, subpart) + ) - # Operation constants. - ADD = "add" - REMOVE = "remove" + # Build datetimes from timestamps. + for e in val: + lm = val[e].get("last-modified", None) + if lm: + lm = cat_ts_to_datetime(lm) + val[e]["last-modified"] = lm - def __init__(self, name, meta_root=None, sign=True): - """Initializes a CatalogUpdate object.""" + self.__data = struct - self.__data = {} - if not name.startswith("update."): - raise UnrecognizedCatalogPart(name) - CatalogPartBase.__init__(self, name, meta_root=meta_root, - sign=sign) + def save(self): + """Transform and store the catalog attribute data in a file + using the pathname /.""" - def add(self, pfmri, operation, op_time, metadata=None): - """Records the specified catalog operation and any related - catalog metadata for the specified package FMRI. + if not self.meta_root: + # Assume this is in-memory only. + return - 'operation' must be one of the following constant values - provided by the CatalogUpdate class: - ADD - REMOVE + # Ensure content is loaded before attempting save. + self.load() - 'op_time' is a UTC datetime object indicating the time - the catalog operation was performed. + if len(self.features): + self.__data["_FEATURE"] = self.features + CatalogPartBase.save(self, self.__transform()) - 'metadata' is an optional dict containing the catalog - metadata that should be stored for the specified FMRI - indexed by catalog part (e.g. "dependency", "summary", - etc.).""" + def validate(self, signatures=None, require_signatures=False): + """Verifies whether the signatures for the contents of the + CatalogAttrs match the specified signature data, or if not + provided, the current signature data. Raises the exception + named 'BadCatalogSignatures' on failure.""" - if not pfmri.publisher: - raise api_errors.AnarchicalCatalogFMRI(pfmri.get_fmri()) + if not self.signatures and not signatures and not require_signatures: + # Nothing to validate, and we're not required to. + return - if operation not in (self.ADD, self.REMOVE): - raise api_errors.UnknownUpdateType(operation) + # Ensure content is loaded before attempting to retrieve + # or generate signature data. + self.load() + if not signatures: + signatures = self.signatures - self.load() - self.__data.setdefault(pfmri.publisher, {}) - pkg_list = self.__data[pfmri.publisher] + new_signatures = self._gen_signatures(self.__transform()) + if new_signatures != signatures: + raise api_errors.BadCatalogSignatures(self.pathname) - pkg_list.setdefault(pfmri.pkg_name, []) - ver_list = pkg_list[pfmri.pkg_name] + created = property(__get_created, __set_created) - if metadata is not None: - entry = metadata - else: - entry = {} - entry["op-time"] = datetime_to_basic_ts(op_time) - entry["op-type"] = operation - entry["version"] = str(pfmri.version) - ver_list.append(entry) - - # To ensure the update log is viewed as having been updated - # at the exact same time as the catalog, the last_modified - # time of the update log must match the operation time. - self.last_modified = op_time - self.signatures = {} - - def load(self): - """Load and transform the catalog update's data, preparing it - for use.""" - - if self.loaded: - # Already loaded, or only in-memory. - return - self.__data = CatalogPartBase.load(self) - - def publishers(self): - """A generator function that returns publisher prefixes as it - iterates over the package data in the CatalogUpdate.""" - - self.load() - for pub in self.__data: - # Any entries starting with "_" are part of the - # reserved catalog namespace. - if not pub[0] == "_": - yield pub - - def save(self): - """Transform and store the catalog update's data in a file using - the pathname /.""" - - if not self.meta_root: - # Assume this is in-memory only. - return - - # Ensure content is loaded before attempting save. - self.load() - - if len(self.features): - self.__data['_FEATURE'] = self.features - CatalogPartBase.save(self, self.__data) - - def updates(self): - """A generator function that produces tuples of the format - (fmri, op_type, op_time, metadata). Where: - - * 'fmri' is a PkgFmri object for the package. - - * 'op_type' is a CatalogUpdate constant indicating - the catalog operation performed. - - * 'op_time' is a UTC datetime object representing the - time time the catalog operation was performed. - - * 'metadata' is a dict containing the catalog metadata - for the FMRI indexed by catalog part name. - - Results are always in ascending operation time order on a - per-publisher, per-stem basis. - """ - - self.load() - - def get_update(pub, stem, entry): - mdata = {} - for key in entry: - if key.startswith("catalog."): - mdata[key] = entry[key] - op_time = basic_ts_to_datetime(entry["op-time"]) - pfmri = fmri.PkgFmri(name=stem, publisher=pub, - version=entry["version"]) - return (pfmri, entry["op-type"], op_time, mdata) - - for pub in self.publishers(): - for stem in self.__data[pub]: - for entry in self.__data[pub][stem]: - yield get_update(pub, stem, entry) - return + last_modified = property(__get_last_modified, __set_last_modified) - def validate(self, signatures=None, require_signatures=False): - """Verifies whether the signatures for the contents of the - CatalogUpdate match the specified signature data, or if not - provided, the current signature data. Raises the exception - named 'BadCatalogSignatures' on failure.""" + package_count = property(__get_package_count, __set_package_count) - if not self.signatures and not signatures and \ - not require_signatures: - # Nothing to validate, and we're not required to. - return + package_version_count = property( + __get_package_version_count, __set_package_version_count + ) - # Ensure content is loaded before attempting to retrieve - # or generate signature data. - self.load() - if not signatures: - signatures = self.signatures + parts = property(__get_parts, __set_parts) - new_signatures = self._gen_signatures(self.__data) - if new_signatures != signatures: - raise api_errors.BadCatalogSignatures(self.pathname) + updates = property(__get_updates, __set_updates) + version = property(__get_version, __set_version) -class CatalogAttrs(CatalogPartBase): - """A CatalogAttrs object is the representation of the attributes of a - Catalog object.""" - - # Properties. - __data = None - - # This structure defines defaults (for use in __init__) as well as - # the set of required elements for this catalog part. See also the - # logic in load(). - __DEFAULT_ELEMS = { - "created": None, - "last-modified": None, - "package-count": 0, - "package-version-count": 0, - "parts": {}, - "updates": {}, - "version": 1, - } - def __init__(self, meta_root=None, sign=True): - """Initializes a CatalogAttrs object.""" - - self.__data = {} - CatalogPartBase.__init__(self, name="catalog.attrs", - meta_root=meta_root, sign=sign) - - if self.loaded: - # If the data is already seen as 'loaded' during init, - # this is actually a new object, so setup some sane - # defaults. - created = self.__data["last-modified"] - self.__data = copy.deepcopy(self.__DEFAULT_ELEMS) - self.__data["created"] = created - self.__data["last-modified"] = created +class Catalog(object): + """A Catalog is the representation of the package FMRIs available from + a package repository.""" + + __BASE_PART = "catalog.base.C" + __DEPS_PART = "catalog.dependency.C" + __SUMM_PART_PFX = "catalog.summary" + + # The file mode to be used for all catalog files. + __file_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + + # These properties are declared here so that they show up in the pydoc + # documentation as private, and for clarity in the property declarations + # found near the end of the class definition. + _attrs = None + __batch_mode = None + __lock = None + __manifest_cb = None + __meta_root = None + __sign = None + + # These are used to cache or store CatalogPart and CatalogUpdate objects + # as they are used. It should not be confused with the CatalogPart + # names and CatalogUpdate names stored in the CatalogAttrs object. + __parts = None + __updates = None + + # Class Constants + DEPENDENCY, SUMMARY = range(2) + + def __init__( + self, + batch_mode=False, + meta_root=None, + log_updates=False, + manifest_cb=None, + read_only=False, + sign=True, + ): + """Initializes a Catalog object. + + 'batch_mode' is an optional boolean value that indicates that + the caller intends to perform multiple modifying operations on + catalog before saving. This is useful for performance reasons + as the contents of the catalog will not be sorted after each + change, and the package counts will not be updated (except at + save()). By default this value is False. If this value is + True, callers are responsible for calling finalize() to ensure + that catalog entries are in the correct order and package counts + accurately reflect the catalog contents. + + 'meta_root' is an optional absolute pathname of a directory + that catalog metadata can be written to and read from, and + must already exist. If no path is supplied, then it is + assumed that the catalog object will be used for in-memory + operations only. + + 'log_updates' is an optional boolean value indicating whether + updates to the catalog should be logged. This enables consumers + of the catalog to perform incremental updates. + + 'manifest_cb' is an optional callback used by actions() and + get_entry_actions() to lazy-load Manifest Actions if the catalog + does not have the actions data for a requested package entry. + + 'read_only' is an optional boolean value that indicates if + operations that modify the catalog are allowed (an assertion + error will be raised if one is attempted and this is True). + + 'sign' is an optional boolean value that indicates that the + the catalog data should have signature data generated and + embedded when serialized. This option is primarily a matter + of convenience for callers that wish to trade integrity checks + for improved catalog serialization performance.""" + + self.__batch_mode = batch_mode + self.__manifest_cb = manifest_cb + self.__parts = {} + self.__updates = {} + + # Must be set after the above. + self.log_updates = log_updates + self.meta_root = meta_root + self.read_only = read_only + self.sign = sign + + # Must be set after the above. + self._attrs = CatalogAttrs(meta_root=self.meta_root, sign=sign) + + # This lock is used to protect the catalog file from multiple + # threads writing to it at the same time. + self.__lock = threading.Lock() + + # Must be done last. + self.__set_perms() + + def __actions( + self, + info_needed, + excludes=EmptyI, + cb=None, + locales=None, + last_version=False, + ordered=False, + pubs=EmptyI, + ): + assert info_needed + if not locales: + locales = set(("C",)) + else: + locales = set(locales) + + for f, entry in self.__entries( + cb=cb, + info_needed=info_needed, + locales=locales, + last_version=last_version, + ordered=ordered, + pubs=pubs, + ): + try: + yield f, self.__gen_actions(f, entry["actions"], excludes) + except KeyError: + if self.__manifest_cb: + yield f, self.__gen_lazy_actions( + f, info_needed, locales, excludes + ) else: - # Assume that the attributes of the catalog can be - # obtained from a file. - self.load() - - def __get_created(self): - return self.__data["created"] - - def __get_last_modified(self): - return self.__data["last-modified"] - - def __get_package_count(self): - return self.__data["package-count"] - - def __get_package_version_count(self): - return self.__data["package-version-count"] - - def __get_parts(self): - return self.__data["parts"] - - def __get_updates(self): - return self.__data["updates"] - - def __get_version(self): - return self.__data["version"] - - def __set_created(self, value): - self.__data["created"] = value - self.signatures = {} - - def __set_last_modified(self, value): - self.__data["last-modified"] = value - self.signatures = {} - - def __set_package_count(self, value): - self.__data["package-count"] = value - self.signatures = {} - - def __set_package_version_count(self, value): - self.__data["package-version-count"] = value - self.signatures = {} - - def __set_parts(self, value): - self.__data["parts"] = value - self.signatures = {} - - def __set_updates(self, value): - self.__data["updates"] = value - self.signatures = {} - - def __set_version(self, value): - self.__data["version"] = value - self.signatures = {} - - def __transform(self): - """Duplicate and transform 'self.__data' for saving.""" - - # Use a copy to prevent the in-memory version from being - # affected by the transformations. - struct = copy.deepcopy(self.__data) - for key, val in six.iteritems(struct): - if isinstance(val, datetime.datetime): - # Convert datetime objects to an ISO-8601 - # basic format string. - struct[key] = datetime_to_basic_ts(val) - continue - - if key in ("parts", "updates"): - for e in val: - lm = val[e].get("last-modified", None) - if lm: - lm = datetime_to_basic_ts(lm) - val[e]["last-modified"] = lm - return struct - - def load(self): - """Load and transform the catalog attribute data.""" - - if self.loaded: - # Already loaded, or only in-memory. - return - location = os.path.join(self.meta_root, self.name) - - struct = CatalogPartBase.load(self) - # Check to see that struct is as we expect: it must be a dict - # and have all of the elements in self.__DEFAULT_ELEMS. - if type(struct) != dict or \ - not (set(self.__DEFAULT_ELEMS.keys()) <= \ - set(struct.keys())): - raise api_errors.InvalidCatalogFile(location) - - def cat_ts_to_datetime(val): - try: - return basic_ts_to_datetime(val) - except ValueError: - raise api_errors.InvalidCatalogFile(location) - - for key, val in six.iteritems(struct): - if key in ("created", "last-modified"): - # Convert ISO-8601 basic format strings to - # datetime objects. These dates can be - # 'null' due to v0 catalog transformations. - if val: - struct[key] = cat_ts_to_datetime(val) - continue - - if key in ("parts", "updates"): - if type(val) != dict: - raise api_errors.InvalidCatalogFile( - location) - - # 'parts' and 'updates' have a more complex - # structure. Check that all of the subparts - # look sane. - for subpart in val: - if subpart != os.path.basename(subpart): - raise api_errors.\ - UnrecognizedCatalogPart( - "{0} {{{1}: {2}}}".format( - self.name, key, subpart)) - - # Build datetimes from timestamps. - for e in val: - lm = val[e].get("last-modified", None) - if lm: - lm = cat_ts_to_datetime(lm) - val[e]["last-modified"] = lm - - self.__data = struct - - def save(self): - """Transform and store the catalog attribute data in a file - using the pathname /.""" - - if not self.meta_root: - # Assume this is in-memory only. - return - - # Ensure content is loaded before attempting save. - self.load() - - if len(self.features): - self.__data['_FEATURE'] = self.features - CatalogPartBase.save(self, self.__transform()) - - def validate(self, signatures=None, require_signatures=False): - """Verifies whether the signatures for the contents of the - CatalogAttrs match the specified signature data, or if not - provided, the current signature data. Raises the exception - named 'BadCatalogSignatures' on failure.""" - - if not self.signatures and not signatures and \ - not require_signatures: - # Nothing to validate, and we're not required to. - return - - # Ensure content is loaded before attempting to retrieve - # or generate signature data. - self.load() - if not signatures: - signatures = self.signatures - - new_signatures = self._gen_signatures(self.__transform()) - if new_signatures != signatures: - raise api_errors.BadCatalogSignatures(self.pathname) - - created = property(__get_created, __set_created) - - last_modified = property(__get_last_modified, __set_last_modified) - - package_count = property(__get_package_count, __set_package_count) - - package_version_count = property(__get_package_version_count, - __set_package_version_count) - - parts = property(__get_parts, __set_parts) - - updates = property(__get_updates, __set_updates) - - version = property(__get_version, __set_version) + yield f, EmptyI + + def __append(self, src, cb=None, pfmri=None, pubs=EmptyI): + """Private version; caller responsible for locking.""" + + base = self.get_part(self.__BASE_PART) + src_base = src.get_part(self.__BASE_PART, must_exist=True) + if src_base is None: + if pfmri: + raise api_errors.UnknownCatalogEntry(pfmri) + # Nothing to do + return + + # Use the same operation time and date for all operations so + # that the last modification times will be synchronized. This + # also has the benefit of avoiding extra datetime object + # instantiations. + op_time = datetime.datetime.utcnow() + + # For each entry in the 'src' catalog, add its BASE entry to the + # current catalog along and then add it to the 'd'iscard dict if + # 'cb' is defined and returns False. + if pfmri: + entry = src_base.get_entry(pfmri) + if entry is None: + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + entries = [(pfmri, entry)] + else: + entries = src_base.entries() + + d = {} + for f, entry in entries: + if pubs and f.publisher not in pubs: + continue + + nentry = copy.deepcopy(entry) + if cb is not None: + merge, mdata = cb(src, f, entry) + if not merge: + pub = d.setdefault(f.publisher, {}) + plist = pub.setdefault(f.pkg_name, set()) + plist.add(f.version) + continue + + if mdata: + if "metadata" in nentry: + nentry["metadata"].update(mdata) + else: + nentry["metadata"] = mdata + base.add(f, metadata=nentry, op_time=op_time) + + if d and pfmri: + # If the 'd'iscards dict is populated and pfmri is + # defined, then there is nothing more to do. + return + + # Finally, merge any catalog part entries that exist unless the + # FMRI is found in the 'd'iscard dict. + for name in src.parts.keys(): + if name == self.__BASE_PART: + continue + + part = src.get_part(name, must_exist=True) + if part is None: + # Part doesn't exist in-memory or on-disk, so + # skip it. + continue + + if pfmri: + entry = part.get_entry(pfmri) + if entry is None: + # Package isn't in this part; skip it. + continue + entries = [(pfmri, entry)] + else: + entries = part.entries() + + npart = self.get_part(name) + for f, entry in entries: + if pubs and f.publisher not in pubs: + continue + if ( + f.publisher in d + and f.pkg_name in d[f.publisher] + and f.version in d[f.publisher][f.pkg_name] + ): + # Skip this package. + continue + + nentry = copy.deepcopy(entry) + npart.add(f, metadata=nentry, op_time=op_time) + + def __entries( + self, + cb=None, + info_needed=EmptyI, + last_version=False, + locales=None, + ordered=False, + pubs=EmptyI, + tuples=False, + ): + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + return + + if not locales: + locales = set(("C",)) + else: + locales = set(locales) + + parts = [] + if self.DEPENDENCY in info_needed: + part = self.get_part(self.__DEPS_PART, must_exist=True) + if part is not None: + parts.append(part) + + if self.SUMMARY in info_needed: + for locale in locales: + part = self.get_part( + "{0}.{1}".format(self.__SUMM_PART_PFX, locale), + must_exist=True, + ) + if part is None: + # Data not available for this + # locale. + continue + parts.append(part) + + def merge_entry(src, dest): + for k, v in six.iteritems(src): + if k == "actions": + dest.setdefault(k, []) + dest[k] += v + elif k != "version": + dest[k] = v + + if tuples: + for r, bentry in base.tuple_entries( + cb=cb, last=last_version, ordered=ordered, pubs=pubs + ): + pub, stem, ver = r + mdata = {} + merge_entry(bentry, mdata) + for part in parts: + entry = part.get_entry(pub=pub, stem=stem, ver=ver) + if entry is None: + # Part doesn't have this FMRI, + # so skip it. + continue + for k, v in six.iteritems(entry): + if k == "actions": + mdata.setdefault(k, []) + mdata[k] += v + elif k != "version": + mdata[k] = v + yield r, mdata + return + + for f, bentry in base.entries( + cb=cb, last=last_version, ordered=ordered, pubs=pubs + ): + mdata = {} + merge_entry(bentry, mdata) + for part in parts: + entry = part.get_entry(f) + if entry is None: + # Part doesn't have this FMRI, + # so skip it. + continue + for k, v in six.iteritems(entry): + if k == "actions": + mdata.setdefault(k, []) + mdata[k] += v + elif k != "version": + mdata[k] = v + yield f, mdata + + def __finalize(self, pfmris=None, pubs=None, sort=True): + """Private finalize method; exposes additional controls for + internal callers.""" + + package_count = 0 + package_version_count = 0 + + part = self.get_part(self.__BASE_PART, must_exist=True) + if part is not None: + # If the base Catalog didn't exist (in-memory or on- + # disk) that implies there is nothing to sort and + # there are no packages (since the base catalog part + # must always exist for packages to be present). + package_count, package_version_count = part.get_package_counts() + + if sort: + # Some operations don't need this, such as + # remove... + for part in self.__parts.values(): + part.sort(pfmris=pfmris, pubs=pubs) + self._attrs.package_count = package_count + self._attrs.package_version_count = package_version_count -class Catalog(object): - """A Catalog is the representation of the package FMRIs available from - a package repository.""" - - __BASE_PART = "catalog.base.C" - __DEPS_PART = "catalog.dependency.C" - __SUMM_PART_PFX = "catalog.summary" - - # The file mode to be used for all catalog files. - __file_mode = stat.S_IRUSR|stat.S_IWUSR|stat.S_IRGRP|stat.S_IROTH - - # These properties are declared here so that they show up in the pydoc - # documentation as private, and for clarity in the property declarations - # found near the end of the class definition. - _attrs = None - __batch_mode = None - __lock = None - __manifest_cb = None - __meta_root = None - __sign = None - - # These are used to cache or store CatalogPart and CatalogUpdate objects - # as they are used. It should not be confused with the CatalogPart - # names and CatalogUpdate names stored in the CatalogAttrs object. - __parts = None - __updates = None - - # Class Constants - DEPENDENCY, SUMMARY = range(2) - - def __init__(self, batch_mode=False, meta_root=None, log_updates=False, - manifest_cb=None, read_only=False, sign=True): - """Initializes a Catalog object. - - 'batch_mode' is an optional boolean value that indicates that - the caller intends to perform multiple modifying operations on - catalog before saving. This is useful for performance reasons - as the contents of the catalog will not be sorted after each - change, and the package counts will not be updated (except at - save()). By default this value is False. If this value is - True, callers are responsible for calling finalize() to ensure - that catalog entries are in the correct order and package counts - accurately reflect the catalog contents. - - 'meta_root' is an optional absolute pathname of a directory - that catalog metadata can be written to and read from, and - must already exist. If no path is supplied, then it is - assumed that the catalog object will be used for in-memory - operations only. - - 'log_updates' is an optional boolean value indicating whether - updates to the catalog should be logged. This enables consumers - of the catalog to perform incremental updates. - - 'manifest_cb' is an optional callback used by actions() and - get_entry_actions() to lazy-load Manifest Actions if the catalog - does not have the actions data for a requested package entry. - - 'read_only' is an optional boolean value that indicates if - operations that modify the catalog are allowed (an assertion - error will be raised if one is attempted and this is True). - - 'sign' is an optional boolean value that indicates that the - the catalog data should have signature data generated and - embedded when serialized. This option is primarily a matter - of convenience for callers that wish to trade integrity checks - for improved catalog serialization performance.""" - - self.__batch_mode = batch_mode - self.__manifest_cb = manifest_cb - self.__parts = {} - self.__updates = {} - - # Must be set after the above. - self.log_updates = log_updates - self.meta_root = meta_root - self.read_only = read_only - self.sign = sign - - # Must be set after the above. - self._attrs = CatalogAttrs(meta_root=self.meta_root, sign=sign) - - # This lock is used to protect the catalog file from multiple - # threads writing to it at the same time. - self.__lock = threading.Lock() - - # Must be done last. - self.__set_perms() - - def __actions(self, info_needed, excludes=EmptyI, cb=None, locales=None, - last_version=False, ordered=False, pubs=EmptyI): - assert info_needed - if not locales: - locales = set(("C",)) + @staticmethod + def __gen_actions(pfmri, actions, excludes=EmptyI): + errors = None + if not isinstance(pfmri, fmri.PkgFmri): + # pfmri is assumed to be a FMRI tuple. + pub, stem, ver = pfmri + else: + pub = pfmri.publisher + for astr in actions: + try: + a = pkg.actions.fromstr(astr) + except pkg.actions.ActionError as e: + # Accumulate errors and continue so that as + # much of the action data as possible can be + # parsed. + if errors is None: + # Allocate this here to avoid overhead + # of list allocation/deallocation. + errors = [] + if not isinstance(pfmri, fmri.PkgFmri): + pfmri = fmri.PkgFmri(name=stem, publisher=pub, version=ver) + e.fmri = pfmri + errors.append(e) + continue + + if a.name == "set" and ( + a.attrs["name"].startswith("facet") + or a.attrs["name"].startswith("variant") + ): + # Don't filter actual facet or variant + # set actions. + yield a + elif a.include_this(excludes, publisher=pub): + yield a + + if errors is not None: + raise api_errors.InvalidPackageErrors(errors) + + def __gen_lazy_actions( + self, f, info_needed, locales=EmptyI, excludes=EmptyI + ): + # Note that the logic below must be kept in sync with + # group_actions found in add_package. + m = self.__manifest_cb(self, f) + if not m: + # If the manifest callback returns None, then + # assume there is no action data to yield. + return + + if Catalog.DEPENDENCY in info_needed: + atypes = ("depend", "set") + elif Catalog.SUMMARY in info_needed: + atypes = ("set",) + else: + raise RuntimeError( + _("Unknown info_needed " "type: {0}".format(info_needed)) + ) + + for a, attr_name in self.__gen_manifest_actions(m, atypes, excludes): + if ( + a.name == "depend" + or attr_name.startswith("variant") + or attr_name.startswith("facet") + or attr_name.startswith("pkg.depend.") + or attr_name in ("pkg.obsolete", "pkg.renamed") + ): + if Catalog.DEPENDENCY in info_needed: + yield a + elif Catalog.SUMMARY in info_needed and a.name == "set": + if attr_name in ( + "fmri", + "pkg.fmri", + "publisher", + ) or attr_name.startswith( + ("info.source-url", "pkg.debug", "pkg.linted") + ): + continue + + comps = attr_name.split(":") + if len(comps) > 1: + # 'set' is locale-specific. + if comps[1] not in locales: + continue + yield a + + @staticmethod + def __gen_manifest_actions(m, atypes, excludes): + """Private helper function to iterate over a Manifest's actions + by action type, returning tuples of (action, attr_name).""" + pub = m.publisher + for atype in atypes: + for a in m.gen_actions_by_type(atype): + if not a.include_this(excludes, publisher=pub): + continue + + if atype == "set": + yield a, a.attrs["name"] else: - locales = set(locales) - - for f, entry in self.__entries(cb=cb, info_needed=info_needed, - locales=locales, last_version=last_version, - ordered=ordered, pubs=pubs): - try: - yield f, self.__gen_actions(f, entry["actions"], - excludes) - except KeyError: - if self.__manifest_cb: - yield f, self.__gen_lazy_actions(f, - info_needed, locales, excludes) - else: - yield f, EmptyI - - def __append(self, src, cb=None, pfmri=None, pubs=EmptyI): - """Private version; caller responsible for locking.""" - - base = self.get_part(self.__BASE_PART) - src_base = src.get_part(self.__BASE_PART, must_exist=True) - if src_base is None: - if pfmri: - raise api_errors.UnknownCatalogEntry(pfmri) - # Nothing to do - return - - # Use the same operation time and date for all operations so - # that the last modification times will be synchronized. This - # also has the benefit of avoiding extra datetime object - # instantiations. - op_time = datetime.datetime.utcnow() - - # For each entry in the 'src' catalog, add its BASE entry to the - # current catalog along and then add it to the 'd'iscard dict if - # 'cb' is defined and returns False. - if pfmri: - entry = src_base.get_entry(pfmri) - if entry is None: - raise api_errors.UnknownCatalogEntry( - pfmri.get_fmri()) - entries = [(pfmri, entry)] + yield a, None + + def __get_batch_mode(self): + return self.__batch_mode + + def __get_last_modified(self): + return self._attrs.last_modified + + def __get_meta_root(self): + return self.__meta_root + + def __get_sign(self): + return self.__sign + + def __get_update(self, name, cache=True, must_exist=False): + # First, check if the update has already been cached, + # and if so, return it. + ulog = self.__updates.get(name, None) + if ulog is not None: + return ulog + elif not self.meta_root and must_exist: + return + + # Next, if the update hasn't been cached, + # create an object for it. + ulog = CatalogUpdate(name, meta_root=self.meta_root, sign=self.__sign) + if self.meta_root and must_exist and not ulog.exists: + # Update doesn't exist on-disk, + # so don't return anything. + return + if cache: + self.__updates[name] = ulog + return ulog + + def __get_version(self): + return self._attrs.version + + def __lock_catalog(self): + """Locks the catalog preventing multiple threads or external + consumers of the catalog from modifying it during operations. + """ + + # XXX need filesystem lock too? + self.__lock.acquire() + + def __log_update(self, pfmri, operation, op_time, entries=None): + """Helper function to log catalog changes.""" + + if not self.__batch_mode: + # The catalog.attrs needs to be updated to reflect + # the changes made. A sort doesn't need to be done + # here as the individual parts will automatically do + # that as needed in this case. + self.__finalize(sort=False) + + # This must be set to exactly the same time as the update logs + # so that the changes in the update logs are not marked as + # being newer than the catalog or vice versa. + attrs = self._attrs + attrs.last_modified = op_time + + if not self.log_updates: + return + + updates = {} + for pname in entries: + # The last component of the updatelog filename is the + # related locale. + locale = pname.split(".", 2)[2] + updates.setdefault(locale, {}) + parts = updates[locale] + parts[pname] = entries[pname] + + logdate = datetime_to_update_ts(op_time) + for locale, metadata in six.iteritems(updates): + name = "update.{0}.{1}".format(logdate, locale) + ulog = self.__get_update(name) + ulog.add(pfmri, operation, metadata=metadata, op_time=op_time) + attrs.updates[name] = {"last-modified": op_time} + + for name, part in six.iteritems(self.__parts): + # Signature data for each part needs to be cleared, + # and will only be available again after save(). + attrs.parts[name] = {"last-modified": part.last_modified} + + @staticmethod + def __parse_fmri_patterns(patterns): + """A generator function that yields a list of tuples of the form + (pattern, error, fmri, matcher) based on the provided patterns, + where 'error' is any exception encountered while parsing the + pattern, 'fmri' is the resulting FMRI object, and 'matcher' is + one of the following pkg.fmri matching functions: + + pkg.fmri.exact_name_match + Indicates that the name portion of the pattern + must match exactly and the version (if provided) + must be considered a successor or equal to the + target FMRI. + + pkg.fmri.fmri_match + Indicates that the name portion of the pattern + must be a proper subset and the version (if + provided) must be considered a successor or + equal to the target FMRI. + + pkg.fmri.glob_match + Indicates that the name portion of the pattern + uses fnmatch rules for pattern matching (shell- + style wildcards) and that the version can either + match exactly, match partially, or contain + wildcards. + """ + + for pat in patterns: + error = None + matcher = None + npat = None + try: + parts = pat.split("@", 1) + pat_stem = parts[0] + pat_ver = None + if len(parts) > 1: + pat_ver = parts[1] + + if "*" in pat_stem or "?" in pat_stem: + matcher = fmri.glob_match + elif pat_stem.startswith("pkg:/") or pat_stem.startswith("/"): + matcher = fmri.exact_name_match else: - entries = src_base.entries() - - d = {} - for f, entry in entries: - if pubs and f.publisher not in pubs: - continue - - nentry = copy.deepcopy(entry) - if cb is not None: - merge, mdata = cb(src, f, entry) - if not merge: - pub = d.setdefault(f.publisher, {}) - plist = pub.setdefault(f.pkg_name, - set()) - plist.add(f.version) - continue - - if mdata: - if "metadata" in nentry: - nentry["metadata"].update(mdata) - else: - nentry["metadata"] = mdata - base.add(f, metadata=nentry, op_time=op_time) - - if d and pfmri: - # If the 'd'iscards dict is populated and pfmri is - # defined, then there is nothing more to do. - return - - # Finally, merge any catalog part entries that exist unless the - # FMRI is found in the 'd'iscard dict. - for name in src.parts.keys(): - if name == self.__BASE_PART: - continue - - part = src.get_part(name, must_exist=True) - if part is None: - # Part doesn't exist in-memory or on-disk, so - # skip it. - continue - - if pfmri: - entry = part.get_entry(pfmri) - if entry is None: - # Package isn't in this part; skip it. - continue - entries = [(pfmri, entry)] - else: - entries = part.entries() - - npart = self.get_part(name) - for f, entry in entries: - if pubs and f.publisher not in pubs: - continue - if f.publisher in d and \ - f.pkg_name in d[f.publisher] and \ - f.version in d[f.publisher][f.pkg_name]: - # Skip this package. - continue - - nentry = copy.deepcopy(entry) - npart.add(f, metadata=nentry, op_time=op_time) - - def __entries(self, cb=None, info_needed=EmptyI, - last_version=False, locales=None, ordered=False, pubs=EmptyI, - tuples=False): - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - return - - if not locales: - locales = set(("C",)) - else: - locales = set(locales) - - parts = [] - if self.DEPENDENCY in info_needed: - part = self.get_part(self.__DEPS_PART, must_exist=True) - if part is not None: - parts.append(part) - - if self.SUMMARY in info_needed: - for locale in locales: - part = self.get_part( - "{0}.{1}".format(self.__SUMM_PART_PFX, - locale), must_exist=True) - if part is None: - # Data not available for this - # locale. - continue - parts.append(part) - - def merge_entry(src, dest): - for k, v in six.iteritems(src): - if k == "actions": - dest.setdefault(k, []) - dest[k] += v - elif k != "version": - dest[k] = v - - if tuples: - for r, bentry in base.tuple_entries(cb=cb, - last=last_version, ordered=ordered, pubs=pubs): - pub, stem, ver = r - mdata = {} - merge_entry(bentry, mdata) - for part in parts: - entry = part.get_entry(pub=pub, - stem=stem, ver=ver) - if entry is None: - # Part doesn't have this FMRI, - # so skip it. - continue - for k, v in six.iteritems(entry): - if k == "actions": - mdata.setdefault(k, []) - mdata[k] += v - elif k != "version": - mdata[k] = v - yield r, mdata - return - - for f, bentry in base.entries(cb=cb, last=last_version, - ordered=ordered, pubs=pubs): - mdata = {} - merge_entry(bentry, mdata) - for part in parts: - entry = part.get_entry(f) - if entry is None: - # Part doesn't have this FMRI, - # so skip it. - continue - for k, v in six.iteritems(entry): - if k == "actions": - mdata.setdefault(k, []) - mdata[k] += v - elif k != "version": - mdata[k] = v - yield f, mdata - - def __finalize(self, pfmris=None, pubs=None, sort=True): - """Private finalize method; exposes additional controls for - internal callers.""" - - package_count = 0 - package_version_count = 0 - - part = self.get_part(self.__BASE_PART, must_exist=True) - if part is not None: - # If the base Catalog didn't exist (in-memory or on- - # disk) that implies there is nothing to sort and - # there are no packages (since the base catalog part - # must always exist for packages to be present). - package_count, package_version_count = \ - part.get_package_counts() - - if sort: - # Some operations don't need this, such as - # remove... - for part in self.__parts.values(): - part.sort(pfmris=pfmris, pubs=pubs) - - self._attrs.package_count = package_count - self._attrs.package_version_count = \ - package_version_count - - @staticmethod - def __gen_actions(pfmri, actions, excludes=EmptyI): - errors = None - if not isinstance(pfmri, fmri.PkgFmri): - # pfmri is assumed to be a FMRI tuple. - pub, stem, ver = pfmri + matcher = fmri.fmri_match + + if matcher == fmri.glob_match: + npat = fmri.MatchingPkgFmri(pat_stem) else: - pub = pfmri.publisher - for astr in actions: - try: - a = pkg.actions.fromstr(astr) - except pkg.actions.ActionError as e: - # Accumulate errors and continue so that as - # much of the action data as possible can be - # parsed. - if errors is None: - # Allocate this here to avoid overhead - # of list allocation/deallocation. - errors = [] - if not isinstance(pfmri, fmri.PkgFmri): - pfmri = fmri.PkgFmri(name=stem, - publisher=pub, version=ver) - e.fmri = pfmri - errors.append(e) - continue - - if a.name == "set" and \ - (a.attrs["name"].startswith("facet") or - a.attrs["name"].startswith("variant")): - # Don't filter actual facet or variant - # set actions. - yield a - elif a.include_this(excludes, - publisher=pub): - yield a - - if errors is not None: - raise api_errors.InvalidPackageErrors(errors) - - def __gen_lazy_actions(self, f, info_needed, locales=EmptyI, - excludes=EmptyI): - # Note that the logic below must be kept in sync with - # group_actions found in add_package. - m = self.__manifest_cb(self, f) - if not m: - # If the manifest callback returns None, then - # assume there is no action data to yield. - return + npat = fmri.PkgFmri(pat_stem) - if Catalog.DEPENDENCY in info_needed: - atypes = ("depend", "set") - elif Catalog.SUMMARY in info_needed: - atypes = ("set",) + if not pat_ver: + # Do nothing. + pass + elif "*" in pat_ver or "?" in pat_ver or pat_ver == "latest": + npat.version = pkg.version.MatchingVersion(pat_ver) else: - raise RuntimeError(_("Unknown info_needed " - "type: {0}".format(info_needed))) - - for a, attr_name in self.__gen_manifest_actions(m, atypes, - excludes): - if (a.name == "depend" or \ - attr_name.startswith("variant") or \ - attr_name.startswith("facet") or \ - attr_name.startswith("pkg.depend.") or \ - attr_name in ("pkg.obsolete", - "pkg.renamed")): - if Catalog.DEPENDENCY in info_needed: - yield a - elif Catalog.SUMMARY in info_needed and a.name == "set": - if attr_name in ("fmri", "pkg.fmri", - "publisher") or attr_name.startswith(( - "info.source-url", "pkg.debug", - "pkg.linted")): - continue - - comps = attr_name.split(":") - if len(comps) > 1: - # 'set' is locale-specific. - if comps[1] not in locales: - continue - yield a - - @staticmethod - def __gen_manifest_actions(m, atypes, excludes): - """Private helper function to iterate over a Manifest's actions - by action type, returning tuples of (action, attr_name).""" - pub = m.publisher - for atype in atypes: - for a in m.gen_actions_by_type(atype): - if not a.include_this(excludes, - publisher=pub): - continue - - if atype == "set": - yield a, a.attrs["name"] - else: - yield a, None - - def __get_batch_mode(self): - return self.__batch_mode - - def __get_last_modified(self): - return self._attrs.last_modified - - def __get_meta_root(self): - return self.__meta_root - - def __get_sign(self): - return self.__sign - - def __get_update(self, name, cache=True, must_exist=False): - # First, check if the update has already been cached, - # and if so, return it. - ulog = self.__updates.get(name, None) - if ulog is not None: - return ulog - elif not self.meta_root and must_exist: - return - - # Next, if the update hasn't been cached, - # create an object for it. - ulog = CatalogUpdate(name, meta_root=self.meta_root, - sign=self.__sign) - if self.meta_root and must_exist and not ulog.exists: - # Update doesn't exist on-disk, - # so don't return anything. - return - if cache: - self.__updates[name] = ulog - return ulog - - def __get_version(self): - return self._attrs.version - - def __lock_catalog(self): - """Locks the catalog preventing multiple threads or external - consumers of the catalog from modifying it during operations. - """ - - # XXX need filesystem lock too? - self.__lock.acquire() - - def __log_update(self, pfmri, operation, op_time, entries=None): - """Helper function to log catalog changes.""" - - if not self.__batch_mode: - # The catalog.attrs needs to be updated to reflect - # the changes made. A sort doesn't need to be done - # here as the individual parts will automatically do - # that as needed in this case. - self.__finalize(sort=False) - - # This must be set to exactly the same time as the update logs - # so that the changes in the update logs are not marked as - # being newer than the catalog or vice versa. - attrs = self._attrs - attrs.last_modified = op_time - - if not self.log_updates: - return - - updates = {} - for pname in entries: - # The last component of the updatelog filename is the - # related locale. - locale = pname.split(".", 2)[2] - updates.setdefault(locale, {}) - parts = updates[locale] - parts[pname] = entries[pname] - - logdate = datetime_to_update_ts(op_time) - for locale, metadata in six.iteritems(updates): - name = "update.{0}.{1}".format(logdate, locale) - ulog = self.__get_update(name) - ulog.add(pfmri, operation, metadata=metadata, - op_time=op_time) - attrs.updates[name] = { - "last-modified": op_time - } - - for name, part in six.iteritems(self.__parts): - # Signature data for each part needs to be cleared, - # and will only be available again after save(). - attrs.parts[name] = { - "last-modified": part.last_modified - } - - @staticmethod - def __parse_fmri_patterns(patterns): - """A generator function that yields a list of tuples of the form - (pattern, error, fmri, matcher) based on the provided patterns, - where 'error' is any exception encountered while parsing the - pattern, 'fmri' is the resulting FMRI object, and 'matcher' is - one of the following pkg.fmri matching functions: - - pkg.fmri.exact_name_match - Indicates that the name portion of the pattern - must match exactly and the version (if provided) - must be considered a successor or equal to the - target FMRI. - - pkg.fmri.fmri_match - Indicates that the name portion of the pattern - must be a proper subset and the version (if - provided) must be considered a successor or - equal to the target FMRI. - - pkg.fmri.glob_match - Indicates that the name portion of the pattern - uses fnmatch rules for pattern matching (shell- - style wildcards) and that the version can either - match exactly, match partially, or contain - wildcards. - """ + npat.version = pkg.version.Version(pat_ver) + + except (fmri.FmriError, pkg.version.VersionError) as e: + # Whatever the error was, return it. + error = e + yield (pat, error, npat, matcher) + + def __save(self, fmt="utf8"): + """Private save function. Caller is responsible for locking + the catalog.""" + + attrs = self._attrs + if self.log_updates: + for name, ulog in six.iteritems(self.__updates): + ulog.load() + ulog.set_feature(FEATURE_UTF8, fmt == "utf8") + ulog.save() + + # Replace the existing signature data + # with the new signature data. + entry = attrs.updates[name] = { + "last-modified": ulog.last_modified + } + for n, v in six.iteritems(ulog.signatures): + entry["signature-{0}".format(n)] = v + + # Save any CatalogParts that are currently in-memory, + # updating their related information in catalog.attrs + # as they are saved. + for name, part in six.iteritems(self.__parts): + # Must save first so that signature data is + # current. + + # single-pass encoding is not used for summary part as + # it increases memory usage substantially (30MB at + # current for /dev). No significant difference is + # detectable for other parts though. + part.load() + part.set_feature(FEATURE_UTF8, fmt == "utf8") + part.save() + + # Now replace the existing signature data with + # the new signature data. + entry = attrs.parts[name] = {"last-modified": part.last_modified} + for n, v in six.iteritems(part.signatures): + entry["signature-{0}".format(n)] = v + + # Finally, save the catalog attributes. + attrs.load() + attrs.set_feature(FEATURE_UTF8, fmt == "utf8") + attrs.save() + + def __set_batch_mode(self, value): + self.__batch_mode = value + for part in self.__parts.values(): + part.ordered = not self.__batch_mode + + def __set_last_modified(self, value): + self._attrs.last_modified = value + + def __set_meta_root(self, pathname): + if pathname: + pathname = os.path.abspath(pathname) + self.__meta_root = pathname + + # If the Catalog's meta_root changes, the meta_root of all of + # its parts must be changed too. + if self._attrs: + self._attrs.meta_root = pathname + + for part in self.__parts.values(): + part.meta_root = pathname + + for ulog in self.__updates.values(): + ulog.meta_root = pathname + + def __set_perms(self): + """Sets permissions on attrs and parts if not read_only and if + the current user can do so; raises BadCatalogPermissions if the + permissions are wrong and cannot be corrected.""" + + if not self.meta_root: + # Nothing to do. + return + + files = [self._attrs.name] + files.extend(self._attrs.parts.keys()) + files.extend(self._attrs.updates.keys()) + + # Force file_mode, so that unprivileged users can read these. + bad_modes = [] + for name in files: + pathname = os.path.join(self.meta_root, name) + try: + if self.read_only: + fmode = stat.S_IMODE(os.stat(pathname).st_mode) + if fmode != self.__file_mode: + bad_modes.append( + ( + pathname, + "{0:o}".format(self.__file_mode), + "{0:o}".format(fmode), + ) + ) + else: + os.chmod(pathname, self.__file_mode) + except EnvironmentError as e: + # If the file doesn't exist yet, move on. + if e.errno == errno.ENOENT: + continue + + # If the mode change failed for another reason, + # check to see if we actually needed to change + # it, and if so, add it to bad_modes. + fmode = stat.S_IMODE(os.stat(pathname).st_mode) + if fmode != self.__file_mode: + bad_modes.append( + ( + pathname, + "{0:o}".format(self.__file_mode), + "{0:o}".format(fmode), + ) + ) - for pat in patterns: - error = None - matcher = None - npat = None - try: - parts = pat.split("@", 1) - pat_stem = parts[0] - pat_ver = None - if len(parts) > 1: - pat_ver = parts[1] - - if "*" in pat_stem or "?" in pat_stem: - matcher = fmri.glob_match - elif pat_stem.startswith("pkg:/") or \ - pat_stem.startswith("/"): - matcher = fmri.exact_name_match - else: - matcher = fmri.fmri_match - - if matcher == fmri.glob_match: - npat = fmri.MatchingPkgFmri(pat_stem) - else: - npat = fmri.PkgFmri(pat_stem) - - if not pat_ver: - # Do nothing. - pass - elif "*" in pat_ver or "?" in pat_ver or \ - pat_ver == "latest": - npat.version = \ - pkg.version.MatchingVersion(pat_ver) - else: - npat.version = \ - pkg.version.Version(pat_ver) - - except (fmri.FmriError, pkg.version.VersionError) as e: - # Whatever the error was, return it. - error = e - yield (pat, error, npat, matcher) - - def __save(self, fmt='utf8'): - """Private save function. Caller is responsible for locking - the catalog.""" - - attrs = self._attrs - if self.log_updates: - for name, ulog in six.iteritems(self.__updates): - ulog.load() - ulog.set_feature(FEATURE_UTF8, fmt == 'utf8') - ulog.save() - - # Replace the existing signature data - # with the new signature data. - entry = attrs.updates[name] = { - "last-modified": ulog.last_modified - } - for n, v in six.iteritems(ulog.signatures): - entry["signature-{0}".format(n)] = v - - # Save any CatalogParts that are currently in-memory, - # updating their related information in catalog.attrs - # as they are saved. - for name, part in six.iteritems(self.__parts): - # Must save first so that signature data is - # current. - - # single-pass encoding is not used for summary part as - # it increases memory usage substantially (30MB at - # current for /dev). No significant difference is - # detectable for other parts though. - part.load() - part.set_feature(FEATURE_UTF8, fmt == 'utf8') - part.save() - - # Now replace the existing signature data with - # the new signature data. - entry = attrs.parts[name] = { - "last-modified": part.last_modified - } - for n, v in six.iteritems(part.signatures): - entry["signature-{0}".format(n)] = v - - # Finally, save the catalog attributes. - attrs.load() - attrs.set_feature(FEATURE_UTF8, fmt == 'utf8') - attrs.save() - - def __set_batch_mode(self, value): - self.__batch_mode = value - for part in self.__parts.values(): - part.ordered = not self.__batch_mode + if bad_modes: + raise api_errors.BadCatalogPermissions(bad_modes) + + def __set_sign(self, value): + self.__sign = value + + # If the Catalog's sign property changes, the value of that + # property for its attributes, etc. must be changed too. + if self._attrs: + self._attrs.sign = value + + for part in self.__parts.values(): + part.sign = value + + for ulog in self.__updates.values(): + ulog.sign = value + + def __set_version(self, value): + self._attrs.version = value + + def __unlock_catalog(self): + """Unlocks the catalog allowing other catalog consumers to + modify it.""" + + # XXX need filesystem unlock too? + self.__lock.release() + + def actions( + self, + info_needed, + excludes=EmptyI, + cb=None, + last=False, + locales=None, + ordered=False, + pubs=EmptyI, + ): + """A generator function that produces tuples of the format + (fmri, actions) as it iterates over the contents of the + catalog (where 'actions' is a generator that returns the + Actions corresponding to the requested information). + + If the catalog doesn't contain any action data for the package + entry, and manifest_cb was defined at Catalog creation time, + the action data will be lazy-loaded by the actions generator; + otherwise it will return an empty iterator. This means that + the manifest_cb will be executed even for packages that don't + actually have any actions corresponding to info_needed. For + example, if a package doesn't have any dependencies, the + manifest_cb will still be executed. This was considered a + reasonable compromise as packages are generally expected to + have DEPENDENCY and SUMMARY information. + + 'excludes' is a list of variants which will be used to determine + what should be allowed by the actions generator in addition to + what is specified by 'info_needed'. + + 'cb' is an optional callback function that will be executed for + each package before its action data is retrieved. It must accept + two arguments: 'pkg' and 'entry'. 'pkg' is an FMRI object and + 'entry' is the dictionary structure of the catalog entry for the + package. If the callback returns False, then the entry will not + be included in the results. This can significantly improve + performance by avoiding action data retrieval for results that + will not be used. + + 'info_needed' is a set of one or more catalog constants + indicating the types of catalog data that will be returned + in 'actions' in addition to the above: + + DEPENDENCY + Depend and set Actions for package obsoletion, + renaming, variants. + + SUMMARY + Any remaining set Actions not listed above, such + as pkg.summary, pkg.description, etc. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the catalog has been saved since the last modifying + operation, or finalize() has has been called, this will also be + the newest version of the package. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pfmri' is an optional FMRI to limit the returned results to. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + return self.__actions( + info_needed, + excludes=excludes, + cb=cb, + last_version=last, + locales=locales, + ordered=ordered, + pubs=pubs, + ) + + def add_package(self, pfmri, manifest=None, metadata=None): + """Add a package and its related metadata to the catalog and + its parts as needed. + + 'manifest' is an optional Manifest object that will be used + to retrieve the metadata related to the package. + + 'metadata' is an optional dict of additional metadata to store + with the package's BASE record.""" + + assert not self.read_only + + def group_actions(actions): + dep_acts = {"C": []} + # Summary actions are grouped by locale, since each + # goes to a locale-specific catalog part. + sum_acts = {"C": []} + for act in actions: + if act.name == "depend": + dep_acts["C"].append(str(act)) + continue + + name = act.attrs["name"] + if ( + name.startswith("variant") + or name.startswith("facet") + or name.startswith("pkg.depend.") + or name in ("pkg.obsolete", "pkg.renamed", "pkg.legacy") + ): + # variant and facet data goes to the + # dependency catalog part. + dep_acts["C"].append(str(act)) + continue + elif name in ("fmri", "pkg.fmri"): + # Redundant in the case of the catalog. + continue + + if name in ("fmri", "pkg.fmri", "publisher") or name.startswith( + ("info.source-url", "pkg.debug", "pkg.linted") + ): + continue + + # All other set actions go to the summary + # catalog parts, grouped by locale. To + # determine the locale, the set attribute's + # name is split by ':' into its field and + # locale components. If ':' is not present, + # then the 'C' locale is assumed. + comps = name.split(":") + if len(comps) > 1: + locale = comps[1] + else: + locale = "C" + if locale not in sum_acts: + sum_acts[locale] = [] + sum_acts[locale].append(str(act)) - def __set_last_modified(self, value): - self._attrs.last_modified = value + return { + "dependency": dep_acts, + "summary": sum_acts, + } - def __set_meta_root(self, pathname): - if pathname: - pathname = os.path.abspath(pathname) - self.__meta_root = pathname + self.__lock_catalog() + try: + entries = {} + # Use the same operation time and date for all + # operations so that the last modification times + # of all catalog parts and update logs will be + # synchronized. + op_time = datetime.datetime.utcnow() + + # Always add packages to the base catalog. + entry = {} + if metadata: + entry["metadata"] = metadata + if manifest: + for k, v in six.iteritems(manifest.signatures): + entry["signature-{0}".format(k)] = v + part = self.get_part(self.__BASE_PART) + entries[part.name] = part.add( + pfmri, metadata=entry, op_time=op_time + ) + + if manifest: + # Without a manifest, only the base catalog data + # can be populated. + + # Only dependency and set actions are currently + # used by the remaining catalog parts. + actions = [] + for atype in "depend", "set": + actions += manifest.gen_actions_by_type(atype) + + gacts = group_actions(actions) + for ctype in gacts: + for locale in gacts[ctype]: + acts = gacts[ctype][locale] + if not acts: + # Catalog entries only + # added if actions are + # present for this + # ctype. + continue + + part = self.get_part( + "catalog" ".{0}.{1}".format(ctype, locale) + ) + entry = {"actions": acts} + entries[part.name] = part.add( + pfmri, metadata=entry, op_time=op_time + ) - # If the Catalog's meta_root changes, the meta_root of all of - # its parts must be changed too. - if self._attrs: - self._attrs.meta_root = pathname + self.__log_update( + pfmri, CatalogUpdate.ADD, op_time, entries=entries + ) + finally: + self.__unlock_catalog() + + def append(self, src, cb=None, pfmri=None, pubs=EmptyI): + """Appends the entries in the specified 'src' catalog to that + of the current catalog. The caller is responsible for ensuring + that no duplicates exist and must call finalize() afterwards to + to ensure consistent catalog state. This function cannot be + used when log_updates or read_only is enabled. + + 'cb' is an optional callback function that must accept src, + an FMRI, and entry. Where 'src' is the source catalog the + FMRI's entry is being copied from, and entry is the source + catalog entry. It must return a tuple of the form (append, + metadata), where 'append' is a boolean value indicating if + the specified package should be appended, and 'metadata' is + a dict of additional metadata to store with the package's + BASE record. + + 'pfmri' is an optional FMRI of a package to append. If not + provided, all FMRIs in the 'src' catalog will be appended. + This filtering is applied before any provided callback. + + 'pubs' is an optional list of publisher prefixes to restrict + the append operation to. FRMIs that have a publisher not in + the list will be skipped. This filtering is applied before + any provided callback. If not provided, no publisher + filtering will be applied.""" + + assert not self.log_updates and not self.read_only + + self.__lock_catalog() + try: + # Append operations are much slower if batch mode is + # not enabled. This ensures that the current state + # is stored and then reset on completion or failure. + # Since append() is never used as part of the + # publication process (log_updates == True), + # this is safe. + old_batch_mode = self.batch_mode + self.batch_mode = True + self.__append(src, cb=cb, pfmri=pfmri, pubs=pubs) + finally: + self.batch_mode = old_batch_mode + self.__unlock_catalog() + + def apply_updates(self, path): + """Apply any CatalogUpdates available to the catalog based on + the list returned by get_updates_needed. The caller must + retrieve all of the resources indicated by get_updates_needed + and place them in the directory indicated by 'path'.""" + + if not self.meta_root: + raise api_errors.CatalogUpdateRequirements() + + # Used to store the original time each part was modified + # as a basis for determining whether to apply specific + # updates. + old_parts = self._attrs.parts + + def apply_incremental(name): + # Load the CatalogUpdate from the path specified. + # (Which is why __get_update is not used.) + ulog = CatalogUpdate(name, meta_root=path) + for pfmri, op_type, op_time, metadata in ulog.updates(): + for pname, pdata in six.iteritems(metadata): + part = self.get_part(pname, must_exist=True) + if part is None: + # Part doesn't exist; skip. + continue + + lm = old_parts[pname]["last-modified"] + if op_time <= lm: + # Only add updates to the part + # that occurred after the last + # time it was originally + # modified. + continue + + if op_type == CatalogUpdate.ADD: + part.add(pfmri, metadata=pdata, op_time=op_time) + elif op_type == CatalogUpdate.REMOVE: + part.remove(pfmri, op_time=op_time) + else: + raise api_errors.UnknownUpdateType(op_type) + + def apply_full(name): + src = os.path.join(path, name) + dest = os.path.join(self.meta_root, name) + portable.copyfile(src, dest) + + self.__lock_catalog() + try: + old_batch_mode = self.batch_mode + self.batch_mode = True - for part in self.__parts.values(): - part.meta_root = pathname - - for ulog in self.__updates.values(): - ulog.meta_root = pathname - - def __set_perms(self): - """Sets permissions on attrs and parts if not read_only and if - the current user can do so; raises BadCatalogPermissions if the - permissions are wrong and cannot be corrected.""" - - if not self.meta_root: - # Nothing to do. - return - - files = [self._attrs.name] - files.extend(self._attrs.parts.keys()) - files.extend(self._attrs.updates.keys()) - - # Force file_mode, so that unprivileged users can read these. - bad_modes = [] - for name in files: - pathname = os.path.join(self.meta_root, name) - try: - if self.read_only: - fmode = stat.S_IMODE(os.stat( - pathname).st_mode) - if fmode != self.__file_mode: - bad_modes.append((pathname, - "{0:o}".format( - self.__file_mode), - "{0:o}".format(fmode))) - else: - os.chmod(pathname, self.__file_mode) - except EnvironmentError as e: - # If the file doesn't exist yet, move on. - if e.errno == errno.ENOENT: - continue - - # If the mode change failed for another reason, - # check to see if we actually needed to change - # it, and if so, add it to bad_modes. - fmode = stat.S_IMODE(os.stat( - pathname).st_mode) - if fmode != self.__file_mode: - bad_modes.append((pathname, - "{0:o}".format(self.__file_mode), - "{0:o}".format(fmode))) - - if bad_modes: - raise api_errors.BadCatalogPermissions(bad_modes) - - def __set_sign(self, value): - self.__sign = value - - # If the Catalog's sign property changes, the value of that - # property for its attributes, etc. must be changed too. - if self._attrs: - self._attrs.sign = value + updates = self.get_updates_needed(path) + if updates == None: + # Nothing has changed, so nothing to do. + return - for part in self.__parts.values(): - part.sign = value - - for ulog in self.__updates.values(): - ulog.sign = value - - def __set_version(self, value): - self._attrs.version = value - - def __unlock_catalog(self): - """Unlocks the catalog allowing other catalog consumers to - modify it.""" - - # XXX need filesystem unlock too? - self.__lock.release() - - def actions(self, info_needed, excludes=EmptyI, cb=None, - last=False, locales=None, ordered=False, pubs=EmptyI): - """A generator function that produces tuples of the format - (fmri, actions) as it iterates over the contents of the - catalog (where 'actions' is a generator that returns the - Actions corresponding to the requested information). - - If the catalog doesn't contain any action data for the package - entry, and manifest_cb was defined at Catalog creation time, - the action data will be lazy-loaded by the actions generator; - otherwise it will return an empty iterator. This means that - the manifest_cb will be executed even for packages that don't - actually have any actions corresponding to info_needed. For - example, if a package doesn't have any dependencies, the - manifest_cb will still be executed. This was considered a - reasonable compromise as packages are generally expected to - have DEPENDENCY and SUMMARY information. - - 'excludes' is a list of variants which will be used to determine - what should be allowed by the actions generator in addition to - what is specified by 'info_needed'. - - 'cb' is an optional callback function that will be executed for - each package before its action data is retrieved. It must accept - two arguments: 'pkg' and 'entry'. 'pkg' is an FMRI object and - 'entry' is the dictionary structure of the catalog entry for the - package. If the callback returns False, then the entry will not - be included in the results. This can significantly improve - performance by avoiding action data retrieval for results that - will not be used. - - 'info_needed' is a set of one or more catalog constants - indicating the types of catalog data that will be returned - in 'actions' in addition to the above: - - DEPENDENCY - Depend and set Actions for package obsoletion, - renaming, variants. - - SUMMARY - Any remaining set Actions not listed above, such - as pkg.summary, pkg.description, etc. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the catalog has been saved since the last modifying - operation, or finalize() has has been called, this will also be - the newest version of the package. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pfmri' is an optional FMRI to limit the returned results to. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - return self.__actions(info_needed, excludes=excludes, - cb=cb, last_version=last, locales=locales, ordered=ordered, - pubs=pubs) - - def add_package(self, pfmri, manifest=None, metadata=None): - """Add a package and its related metadata to the catalog and - its parts as needed. - - 'manifest' is an optional Manifest object that will be used - to retrieve the metadata related to the package. - - 'metadata' is an optional dict of additional metadata to store - with the package's BASE record.""" - - assert not self.read_only - - def group_actions(actions): - dep_acts = { "C": [] } - # Summary actions are grouped by locale, since each - # goes to a locale-specific catalog part. - sum_acts = { "C": [] } - for act in actions: - if act.name == "depend": - dep_acts["C"].append(str(act)) - continue - - name = act.attrs["name"] - if name.startswith("variant") or \ - name.startswith("facet") or \ - name.startswith("pkg.depend.") or \ - name in ("pkg.obsolete", "pkg.renamed", "pkg.legacy"): - # variant and facet data goes to the - # dependency catalog part. - dep_acts["C"].append(str(act)) - continue - elif name in ("fmri", "pkg.fmri"): - # Redundant in the case of the catalog. - continue - - if name in ("fmri", "pkg.fmri", - "publisher") or name.startswith(( - "info.source-url", "pkg.debug", - "pkg.linted")): - continue - - # All other set actions go to the summary - # catalog parts, grouped by locale. To - # determine the locale, the set attribute's - # name is split by ':' into its field and - # locale components. If ':' is not present, - # then the 'C' locale is assumed. - comps = name.split(":") - if len(comps) > 1: - locale = comps[1] - else: - locale = "C" - if locale not in sum_acts: - sum_acts[locale] = [] - sum_acts[locale].append(str(act)) - - return { - "dependency": dep_acts, - "summary": sum_acts, - } - - self.__lock_catalog() - try: - entries = {} - # Use the same operation time and date for all - # operations so that the last modification times - # of all catalog parts and update logs will be - # synchronized. - op_time = datetime.datetime.utcnow() - - # Always add packages to the base catalog. - entry = {} - if metadata: - entry["metadata"] = metadata - if manifest: - for k, v in six.iteritems(manifest.signatures): - entry["signature-{0}".format(k)] = v - part = self.get_part(self.__BASE_PART) - entries[part.name] = part.add(pfmri, metadata=entry, - op_time=op_time) - - if manifest: - # Without a manifest, only the base catalog data - # can be populated. - - # Only dependency and set actions are currently - # used by the remaining catalog parts. - actions = [] - for atype in "depend", "set": - actions += manifest.gen_actions_by_type( - atype) - - gacts = group_actions(actions) - for ctype in gacts: - for locale in gacts[ctype]: - acts = gacts[ctype][locale] - if not acts: - # Catalog entries only - # added if actions are - # present for this - # ctype. - continue - - part = self.get_part("catalog" - ".{0}.{1}".format(ctype, - locale)) - entry = { "actions": acts } - entries[part.name] = part.add( - pfmri, metadata=entry, - op_time=op_time) - - self.__log_update(pfmri, CatalogUpdate.ADD, op_time, - entries=entries) - finally: - self.__unlock_catalog() - - def append(self, src, cb=None, pfmri=None, pubs=EmptyI): - """Appends the entries in the specified 'src' catalog to that - of the current catalog. The caller is responsible for ensuring - that no duplicates exist and must call finalize() afterwards to - to ensure consistent catalog state. This function cannot be - used when log_updates or read_only is enabled. - - 'cb' is an optional callback function that must accept src, - an FMRI, and entry. Where 'src' is the source catalog the - FMRI's entry is being copied from, and entry is the source - catalog entry. It must return a tuple of the form (append, - metadata), where 'append' is a boolean value indicating if - the specified package should be appended, and 'metadata' is - a dict of additional metadata to store with the package's - BASE record. - - 'pfmri' is an optional FMRI of a package to append. If not - provided, all FMRIs in the 'src' catalog will be appended. - This filtering is applied before any provided callback. - - 'pubs' is an optional list of publisher prefixes to restrict - the append operation to. FRMIs that have a publisher not in - the list will be skipped. This filtering is applied before - any provided callback. If not provided, no publisher - filtering will be applied.""" - - assert not self.log_updates and not self.read_only - - self.__lock_catalog() - try: - # Append operations are much slower if batch mode is - # not enabled. This ensures that the current state - # is stored and then reset on completion or failure. - # Since append() is never used as part of the - # publication process (log_updates == True), - # this is safe. - old_batch_mode = self.batch_mode - self.batch_mode = True - self.__append(src, cb=cb, pfmri=pfmri, pubs=pubs) - finally: - self.batch_mode = old_batch_mode - self.__unlock_catalog() - - def apply_updates(self, path): - """Apply any CatalogUpdates available to the catalog based on - the list returned by get_updates_needed. The caller must - retrieve all of the resources indicated by get_updates_needed - and place them in the directory indicated by 'path'.""" - - if not self.meta_root: - raise api_errors.CatalogUpdateRequirements() - - # Used to store the original time each part was modified - # as a basis for determining whether to apply specific - # updates. - old_parts = self._attrs.parts - def apply_incremental(name): - # Load the CatalogUpdate from the path specified. - # (Which is why __get_update is not used.) - ulog = CatalogUpdate(name, meta_root=path) - for pfmri, op_type, op_time, metadata in ulog.updates(): - for pname, pdata in six.iteritems(metadata): - part = self.get_part(pname, - must_exist=True) - if part is None: - # Part doesn't exist; skip. - continue - - lm = old_parts[pname]["last-modified"] - if op_time <= lm: - # Only add updates to the part - # that occurred after the last - # time it was originally - # modified. - continue - - if op_type == CatalogUpdate.ADD: - part.add(pfmri, metadata=pdata, - op_time=op_time) - elif op_type == CatalogUpdate.REMOVE: - part.remove(pfmri, - op_time=op_time) - else: - raise api_errors.UnknownUpdateType( - op_type) - - def apply_full(name): - src = os.path.join(path, name) - dest = os.path.join(self.meta_root, name) - portable.copyfile(src, dest) - - self.__lock_catalog() - try: - old_batch_mode = self.batch_mode - self.batch_mode = True - - updates = self.get_updates_needed(path) - if updates == None: - # Nothing has changed, so nothing to do. - return - - for name in updates: - if name.startswith("update."): - # The provided update is an incremental. - apply_incremental(name) - else: - # The provided update is a full update. - apply_full(name) - - # Next, verify that all of the updated parts have a - # signature that matches the new catalog.attrs file. - new_attrs = CatalogAttrs(meta_root=path) - new_sigs = {} - for name, mdata in six.iteritems(new_attrs.parts): - new_sigs[name] = {} - for key in mdata: - if not key.startswith("signature-"): - continue - sig = key.split("signature-")[1] - new_sigs[name][sig] = mdata[key] - - # This must be done to ensure that the catalog - # signature matches that of the source. - self.batch_mode = old_batch_mode - self.finalize() - - for name, part in six.iteritems(self.__parts): - part.validate(signatures=new_sigs[name]) - - # Finally, save the catalog, and then copy the new - # catalog attributes file into place and reload it. - self.__save() - apply_full(self._attrs.name) - - self._attrs = CatalogAttrs(meta_root=self.meta_root) - self.__set_perms() - finally: - self.batch_mode = old_batch_mode - self.__unlock_catalog() - - def categories(self, excludes=EmptyI, pubs=EmptyI): - """Returns a set of tuples of the form (scheme, category) - containing the names of all categories in use by the last - version of each unique package in the catalog on a per- - publisher basis. - - 'excludes' is a list of variants which will be used to - determine what category actions will be checked. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - acts = self.__actions([self.SUMMARY], excludes=excludes, - last_version=True, pubs=pubs) - return set(( - sc - for f, acts in acts - for a in acts - if a.has_category_info() - for sc in a.parse_category_info() - )) - - @property - def created(self): - """A UTC datetime object indicating the time the catalog was - created.""" - return self._attrs.created - - def destroy(self): - """Removes any on-disk files that exist for the catalog and - discards all content.""" - - for name in self._attrs.parts: - part = self.get_part(name) - part.destroy() - - for name in self._attrs.updates: - ulog = self.__get_update(name, cache=False) - ulog.destroy() - - self._attrs = CatalogAttrs(meta_root=self.meta_root, - sign=self.__sign) - self.__parts = {} - self.__updates = {} - self._attrs.destroy() - - if not self.meta_root or not os.path.exists(self.meta_root): - return - - # Finally, ensure that if there are any leftover files from - # an interrupted destroy in the past that they are removed - # as well. - for fname in os.listdir(self.meta_root): - if not fname.startswith("catalog.") and \ - not fname.startswith("update."): - continue - - pname = os.path.join(self.meta_root, fname) - if not os.path.isfile(pname): - continue - - try: - portable.remove(pname) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - - def entries(self, info_needed=EmptyI, last=False, locales=None, - ordered=False, pubs=EmptyI): - """A generator function that produces tuples of the format - (fmri, metadata) as it iterates over the contents of the - catalog (where 'metadata' is a dict containing the requested - information). - - 'metadata' always contains the following information at a - minimum: - - BASE - 'metadata' will be populated with Manifest - signature data, if available, using key-value - pairs of the form 'signature-': value. - - 'info_needed' is an optional list of one or more catalog - constants indicating the types of catalog data that will - be returned in 'metadata' in addition to the above: - - DEPENDENCY - 'metadata' will contain depend and set Actions - for package obsoletion, renaming, variants, - and facets stored in a list under the - key 'actions'. - - SUMMARY - 'metadata' will contain any remaining Actions - not listed above, such as pkg.summary, - pkg.description, etc. in a list under the key - 'actions'. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the catalog has been saved since the last modifying - operation, or finalize() has has been called, this will also be - the newest version of the package. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - Note that unlike actions(), catalog entries will not lazy-load - action data if it is missing from the catalog. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - return self.__entries(info_needed=info_needed, - last_version=last, locales=locales, ordered=ordered, - pubs=pubs) - - def entries_by_version(self, name, info_needed=EmptyI, locales=None, - pubs=EmptyI): - """A generator function that produces tuples of the format - (version, entries) as it iterates over the contents of the - the catalog, where entries is a list of tuples of the format - (fmri, metadata) and metadata is a dict containing the - requested information. - - 'metadata' always contains the following information at a - minimum: - - BASE - 'metadata' will be populated with Manifest - signature data, if available, using key-value - pairs of the form 'signature-': value. - - 'info_needed' is an optional list of one or more catalog - constants indicating the types of catalog data that will - be returned in 'metadata' in addition to the above: - - DEPENDENCY - 'metadata' will contain depend and set Actions - for package obsoletion, renaming, variants, - and facets stored in a list under the - key 'actions'. - - SUMMARY - 'metadata' will contain any remaining Actions - not listed above, such as pkg.summary, - pkg.description, etc. in a list under the key - 'actions'. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - return - - if not locales: - locales = set(("C",)) + for name in updates: + if name.startswith("update."): + # The provided update is an incremental. + apply_incremental(name) else: - locales = set(locales) - - parts = [] - if self.DEPENDENCY in info_needed: - part = self.get_part(self.__DEPS_PART, must_exist=True) - if part is not None: - parts.append(part) - - if self.SUMMARY in info_needed: - for locale in locales: - part = self.get_part( - "{0}.{1}".format(self.__SUMM_PART_PFX, - locale), must_exist=True) - if part is None: - # Data not available for this - # locale. - continue - parts.append(part) - - def merge_entry(src, dest): - for k, v in six.iteritems(src): - if k == "actions": - dest.setdefault(k, []) - dest[k] += v - elif k != "version": - dest[k] = v - - for ver, entries in base.entries_by_version(name, pubs=pubs): - nentries = [] - for f, bentry in entries: - mdata = {} - merge_entry(bentry, mdata) - for part in parts: - entry = part.get_entry(f) - if entry is None: - # Part doesn't have this FMRI, - # so skip it. - continue - merge_entry(entry, mdata) - nentries.append((f, mdata)) - yield ver, nentries - - def entry_actions(self, info_needed, excludes=EmptyI, cb=None, - last=False, locales=None, ordered=False, pubs=EmptyI): - """A generator function that produces tuples of the format - ((pub, stem, version), entry, actions) as it iterates over - the contents of the catalog (where 'actions' is a generator - that returns the Actions corresponding to the requested - information). - - If the catalog doesn't contain any action data for the package - entry, and manifest_cb was defined at Catalog creation time, - the action data will be lazy-loaded by the actions generator; - otherwise it will return an empty iterator. This means that - the manifest_cb will be executed even for packages that don't - actually have any actions corresponding to info_needed. For - example, if a package doesn't have any dependencies, the - manifest_cb will still be executed. This was considered a - reasonable compromise as packages are generally expected to - have DEPENDENCY and SUMMARY information. - - 'excludes' is a list of variants which will be used to determine - what should be allowed by the actions generator in addition to - what is specified by 'info_needed'. - - 'cb' is an optional callback function that will be executed for - each package before its action data is retrieved. It must accept - two arguments: 'pkg' and 'entry'. 'pkg' is an FMRI object and - 'entry' is the dictionary structure of the catalog entry for the - package. If the callback returns False, then the entry will not - be included in the results. This can significantly improve - performance by avoiding action data retrieval for results that - will not be used. - - 'info_needed' is a set of one or more catalog constants - indicating the types of catalog data that will be returned - in 'actions' in addition to the above: - - DEPENDENCY - Depend and set Actions for package obsoletion, - renaming, variants. - - SUMMARY - Any remaining set Actions not listed above, such - as pkg.summary, pkg.description, etc. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the catalog has been saved since the last modifying - operation, or finalize() has has been called, this will also be - the newest version of the package. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pfmri' is an optional FMRI to limit the returned results to. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - for r, entry in self.__entries(cb=cb, info_needed=info_needed, - locales=locales, last_version=last, ordered=ordered, - pubs=pubs, tuples=True): - try: - yield (r, entry, - self.__gen_actions(r, entry["actions"], - excludes)) - except KeyError: - if self.__manifest_cb: - pub, stem, ver = r - f = fmri.PkgFmri(name=stem, publisher=pub, - version=ver) - yield (r, entry, - self.__gen_lazy_actions(f, info_needed, - locales, excludes)) - else: - yield r, entry, EmptyI - - @property - def exists(self): - """A boolean value indicating whether the Catalog exists - on-disk.""" - - # If the Catalog attrs file exists on-disk, - # then the catalog does. - attrs = self._attrs - return attrs.exists - - def finalize(self, pfmris=None, pubs=None): - """This function re-sorts the contents of the Catalog so that - version entries are in the correct order and sets the package - counts for the Catalog based on its current contents. - - 'pfmris' is an optional set of FMRIs that indicate what package - entries have been changed since this function was last called. - It is used to optimize the finalization process. - - 'pubs' is an optional set of publisher prefixes that indicate - what publisher has had package entries changed. It is used - to optimize the finalization process. This option has no effect - if 'pfmris' is also provided.""" - - return self.__finalize(pfmris=pfmris, pubs=pubs) - - def fmris(self, last=False, objects=True, ordered=False, pubs=EmptyI): - """A generator function that produces FMRIs as it iterates - over the contents of the catalog. - - 'last' is a boolean value that indicates only the last FMRI - for each package on a per-publisher basis should be returned. - As long as the catalog has been saved since the last modifying - operation, or finalize() has has been called, this will also be - the newest version of the package. - - 'objects' is an optional boolean value indicating whether - FMRIs should be returned as FMRI objects or as strings. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - - # This construction is necessary to get python to - # return no results properly to callers expecting - # a generator function. - return iter(()) - return base.fmris(last=last, objects=objects, ordered=ordered, - pubs=pubs) - - def fmris_by_version(self, name, pubs=EmptyI): - """A generator function that produces tuples of (version, - fmris), where fmris is a of the fmris related to the - version, for the given package name. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - - # This construction is necessary to get python to - # return no results properly to callers expecting - # a generator function. - return iter(()) - return base.fmris_by_version(name, pubs=pubs) - - def get_entry(self, pfmri, info_needed=EmptyI, locales=None): - """Returns a dict containing the metadata for the specified - FMRI containing the requested information. If the specified - FMRI does not exist in the catalog, a value of None will be - returned. - - 'metadata' always contains the following information at a - minimum: - - BASE - 'metadata' will be populated with Manifest - signature data, if available, using key-value - pairs of the form 'signature-': value. - - 'info_needed' is an optional list of one or more catalog - constants indicating the types of catalog data that will - be returned in 'metadata' in addition to the above: - - DEPENDENCY - 'metadata' will contain depend and set Actions - for package obsoletion, renaming, variants, - and facets stored in a list under the - key 'actions'. - - SUMMARY - 'metadata' will contain any remaining Actions - not listed above, such as pkg.summary, - pkg.description, etc. in a list under the key - 'actions'. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - """ - - def merge_entry(src, dest): - for k, v in six.iteritems(src): - if k == "actions": - dest.setdefault(k, []) - dest[k] += v - elif k != "version": - dest[k] = v - - parts = [] - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - return - - if not locales: - locales = set(("C",)) - else: - locales = set(locales) - - # Always attempt to retrieve the BASE entry as FMRIs - # must be present in the BASE catalog part. + # The provided update is a full update. + apply_full(name) + + # Next, verify that all of the updated parts have a + # signature that matches the new catalog.attrs file. + new_attrs = CatalogAttrs(meta_root=path) + new_sigs = {} + for name, mdata in six.iteritems(new_attrs.parts): + new_sigs[name] = {} + for key in mdata: + if not key.startswith("signature-"): + continue + sig = key.split("signature-")[1] + new_sigs[name][sig] = mdata[key] + + # This must be done to ensure that the catalog + # signature matches that of the source. + self.batch_mode = old_batch_mode + self.finalize() + + for name, part in six.iteritems(self.__parts): + part.validate(signatures=new_sigs[name]) + + # Finally, save the catalog, and then copy the new + # catalog attributes file into place and reload it. + self.__save() + apply_full(self._attrs.name) + + self._attrs = CatalogAttrs(meta_root=self.meta_root) + self.__set_perms() + finally: + self.batch_mode = old_batch_mode + self.__unlock_catalog() + + def categories(self, excludes=EmptyI, pubs=EmptyI): + """Returns a set of tuples of the form (scheme, category) + containing the names of all categories in use by the last + version of each unique package in the catalog on a per- + publisher basis. + + 'excludes' is a list of variants which will be used to + determine what category actions will be checked. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + acts = self.__actions( + [self.SUMMARY], excludes=excludes, last_version=True, pubs=pubs + ) + return set( + ( + sc + for f, acts in acts + for a in acts + if a.has_category_info() + for sc in a.parse_category_info() + ) + ) + + @property + def created(self): + """A UTC datetime object indicating the time the catalog was + created.""" + return self._attrs.created + + def destroy(self): + """Removes any on-disk files that exist for the catalog and + discards all content.""" + + for name in self._attrs.parts: + part = self.get_part(name) + part.destroy() + + for name in self._attrs.updates: + ulog = self.__get_update(name, cache=False) + ulog.destroy() + + self._attrs = CatalogAttrs(meta_root=self.meta_root, sign=self.__sign) + self.__parts = {} + self.__updates = {} + self._attrs.destroy() + + if not self.meta_root or not os.path.exists(self.meta_root): + return + + # Finally, ensure that if there are any leftover files from + # an interrupted destroy in the past that they are removed + # as well. + for fname in os.listdir(self.meta_root): + if not fname.startswith("catalog.") and not fname.startswith( + "update." + ): + continue + + pname = os.path.join(self.meta_root, fname) + if not os.path.isfile(pname): + continue + + try: + portable.remove(pname) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + + def entries( + self, + info_needed=EmptyI, + last=False, + locales=None, + ordered=False, + pubs=EmptyI, + ): + """A generator function that produces tuples of the format + (fmri, metadata) as it iterates over the contents of the + catalog (where 'metadata' is a dict containing the requested + information). + + 'metadata' always contains the following information at a + minimum: + + BASE + 'metadata' will be populated with Manifest + signature data, if available, using key-value + pairs of the form 'signature-': value. + + 'info_needed' is an optional list of one or more catalog + constants indicating the types of catalog data that will + be returned in 'metadata' in addition to the above: + + DEPENDENCY + 'metadata' will contain depend and set Actions + for package obsoletion, renaming, variants, + and facets stored in a list under the + key 'actions'. + + SUMMARY + 'metadata' will contain any remaining Actions + not listed above, such as pkg.summary, + pkg.description, etc. in a list under the key + 'actions'. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the catalog has been saved since the last modifying + operation, or finalize() has has been called, this will also be + the newest version of the package. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + Note that unlike actions(), catalog entries will not lazy-load + action data if it is missing from the catalog. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + return self.__entries( + info_needed=info_needed, + last_version=last, + locales=locales, + ordered=ordered, + pubs=pubs, + ) + + def entries_by_version( + self, name, info_needed=EmptyI, locales=None, pubs=EmptyI + ): + """A generator function that produces tuples of the format + (version, entries) as it iterates over the contents of the + the catalog, where entries is a list of tuples of the format + (fmri, metadata) and metadata is a dict containing the + requested information. + + 'metadata' always contains the following information at a + minimum: + + BASE + 'metadata' will be populated with Manifest + signature data, if available, using key-value + pairs of the form 'signature-': value. + + 'info_needed' is an optional list of one or more catalog + constants indicating the types of catalog data that will + be returned in 'metadata' in addition to the above: + + DEPENDENCY + 'metadata' will contain depend and set Actions + for package obsoletion, renaming, variants, + and facets stored in a list under the + key 'actions'. + + SUMMARY + 'metadata' will contain any remaining Actions + not listed above, such as pkg.summary, + pkg.description, etc. in a list under the key + 'actions'. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + return + + if not locales: + locales = set(("C",)) + else: + locales = set(locales) + + parts = [] + if self.DEPENDENCY in info_needed: + part = self.get_part(self.__DEPS_PART, must_exist=True) + if part is not None: + parts.append(part) + + if self.SUMMARY in info_needed: + for locale in locales: + part = self.get_part( + "{0}.{1}".format(self.__SUMM_PART_PFX, locale), + must_exist=True, + ) + if part is None: + # Data not available for this + # locale. + continue + parts.append(part) + + def merge_entry(src, dest): + for k, v in six.iteritems(src): + if k == "actions": + dest.setdefault(k, []) + dest[k] += v + elif k != "version": + dest[k] = v + + for ver, entries in base.entries_by_version(name, pubs=pubs): + nentries = [] + for f, bentry in entries: mdata = {} - bentry = base.get_entry(pfmri) - if bentry is None: - return merge_entry(bentry, mdata) - - if self.DEPENDENCY in info_needed: - part = self.get_part(self.__DEPS_PART, - must_exist=True) - if part is not None: - parts.append(part) - - if self.SUMMARY in info_needed: - for locale in locales: - part = self.get_part( - "{0}.{1}".format(self.__SUMM_PART_PFX, - locale), must_exist=True) - if part is None: - # Data not available for this - # locale. - continue - parts.append(part) - for part in parts: - entry = part.get_entry(pfmri) - if entry is None: - # Part doesn't have this FMRI, - # so skip it. - continue - merge_entry(entry, mdata) - return mdata - - def get_entry_actions(self, pfmri, info_needed, excludes=EmptyI, - locales=None): - """A generator function that produces Actions as it iterates - over the catalog entry of the specified FMRI corresponding to - the requested information). If the catalog doesn't contain - any action data for the package entry, and manifest_cb was - defined at Catalog creation time, the action data will be - lazy-loaded by the actions generator; otherwise it will - return an empty iterator. - - 'excludes' is a list of variants which will be used to determine - what should be allowed by the actions generator in addition to - what is specified by 'info_needed'. If not provided, only - 'info_needed' will determine what actions are returned. - - 'info_needed' is a set of one or more catalog constants - indicating the types of catalog data that will be returned - in 'actions' in addition to the above: - - DEPENDENCY - Depend and set Actions for package obsoletion, - renaming, variants. - - SUMMARY - Any remaining set Actions not listed above, such - as pkg.summary, pkg.description, etc. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - """ - - assert info_needed - if not locales: - locales = set(("C",)) + entry = part.get_entry(f) + if entry is None: + # Part doesn't have this FMRI, + # so skip it. + continue + merge_entry(entry, mdata) + nentries.append((f, mdata)) + yield ver, nentries + + def entry_actions( + self, + info_needed, + excludes=EmptyI, + cb=None, + last=False, + locales=None, + ordered=False, + pubs=EmptyI, + ): + """A generator function that produces tuples of the format + ((pub, stem, version), entry, actions) as it iterates over + the contents of the catalog (where 'actions' is a generator + that returns the Actions corresponding to the requested + information). + + If the catalog doesn't contain any action data for the package + entry, and manifest_cb was defined at Catalog creation time, + the action data will be lazy-loaded by the actions generator; + otherwise it will return an empty iterator. This means that + the manifest_cb will be executed even for packages that don't + actually have any actions corresponding to info_needed. For + example, if a package doesn't have any dependencies, the + manifest_cb will still be executed. This was considered a + reasonable compromise as packages are generally expected to + have DEPENDENCY and SUMMARY information. + + 'excludes' is a list of variants which will be used to determine + what should be allowed by the actions generator in addition to + what is specified by 'info_needed'. + + 'cb' is an optional callback function that will be executed for + each package before its action data is retrieved. It must accept + two arguments: 'pkg' and 'entry'. 'pkg' is an FMRI object and + 'entry' is the dictionary structure of the catalog entry for the + package. If the callback returns False, then the entry will not + be included in the results. This can significantly improve + performance by avoiding action data retrieval for results that + will not be used. + + 'info_needed' is a set of one or more catalog constants + indicating the types of catalog data that will be returned + in 'actions' in addition to the above: + + DEPENDENCY + Depend and set Actions for package obsoletion, + renaming, variants. + + SUMMARY + Any remaining set Actions not listed above, such + as pkg.summary, pkg.description, etc. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the catalog has been saved since the last modifying + operation, or finalize() has has been called, this will also be + the newest version of the package. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pfmri' is an optional FMRI to limit the returned results to. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + for r, entry in self.__entries( + cb=cb, + info_needed=info_needed, + locales=locales, + last_version=last, + ordered=ordered, + pubs=pubs, + tuples=True, + ): + try: + yield ( + r, + entry, + self.__gen_actions(r, entry["actions"], excludes), + ) + except KeyError: + if self.__manifest_cb: + pub, stem, ver = r + f = fmri.PkgFmri(name=stem, publisher=pub, version=ver) + yield ( + r, + entry, + self.__gen_lazy_actions( + f, info_needed, locales, excludes + ), + ) else: - locales = set(locales) + yield r, entry, EmptyI + + @property + def exists(self): + """A boolean value indicating whether the Catalog exists + on-disk.""" + + # If the Catalog attrs file exists on-disk, + # then the catalog does. + attrs = self._attrs + return attrs.exists + + def finalize(self, pfmris=None, pubs=None): + """This function re-sorts the contents of the Catalog so that + version entries are in the correct order and sets the package + counts for the Catalog based on its current contents. + + 'pfmris' is an optional set of FMRIs that indicate what package + entries have been changed since this function was last called. + It is used to optimize the finalization process. + + 'pubs' is an optional set of publisher prefixes that indicate + what publisher has had package entries changed. It is used + to optimize the finalization process. This option has no effect + if 'pfmris' is also provided.""" + + return self.__finalize(pfmris=pfmris, pubs=pubs) + + def fmris(self, last=False, objects=True, ordered=False, pubs=EmptyI): + """A generator function that produces FMRIs as it iterates + over the contents of the catalog. + + 'last' is a boolean value that indicates only the last FMRI + for each package on a per-publisher basis should be returned. + As long as the catalog has been saved since the last modifying + operation, or finalize() has has been called, this will also be + the newest version of the package. + + 'objects' is an optional boolean value indicating whether + FMRIs should be returned as FMRI objects or as strings. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + + # This construction is necessary to get python to + # return no results properly to callers expecting + # a generator function. + return iter(()) + return base.fmris( + last=last, objects=objects, ordered=ordered, pubs=pubs + ) + + def fmris_by_version(self, name, pubs=EmptyI): + """A generator function that produces tuples of (version, + fmris), where fmris is a of the fmris related to the + version, for the given package name. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + + # This construction is necessary to get python to + # return no results properly to callers expecting + # a generator function. + return iter(()) + return base.fmris_by_version(name, pubs=pubs) + + def get_entry(self, pfmri, info_needed=EmptyI, locales=None): + """Returns a dict containing the metadata for the specified + FMRI containing the requested information. If the specified + FMRI does not exist in the catalog, a value of None will be + returned. + + 'metadata' always contains the following information at a + minimum: + + BASE + 'metadata' will be populated with Manifest + signature data, if available, using key-value + pairs of the form 'signature-': value. + + 'info_needed' is an optional list of one or more catalog + constants indicating the types of catalog data that will + be returned in 'metadata' in addition to the above: + + DEPENDENCY + 'metadata' will contain depend and set Actions + for package obsoletion, renaming, variants, + and facets stored in a list under the + key 'actions'. + + SUMMARY + 'metadata' will contain any remaining Actions + not listed above, such as pkg.summary, + pkg.description, etc. in a list under the key + 'actions'. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + """ + + def merge_entry(src, dest): + for k, v in six.iteritems(src): + if k == "actions": + dest.setdefault(k, []) + dest[k] += v + elif k != "version": + dest[k] = v + + parts = [] + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + return + + if not locales: + locales = set(("C",)) + else: + locales = set(locales) + + # Always attempt to retrieve the BASE entry as FMRIs + # must be present in the BASE catalog part. + mdata = {} + bentry = base.get_entry(pfmri) + if bentry is None: + return + merge_entry(bentry, mdata) + + if self.DEPENDENCY in info_needed: + part = self.get_part(self.__DEPS_PART, must_exist=True) + if part is not None: + parts.append(part) + + if self.SUMMARY in info_needed: + for locale in locales: + part = self.get_part( + "{0}.{1}".format(self.__SUMM_PART_PFX, locale), + must_exist=True, + ) + if part is None: + # Data not available for this + # locale. + continue + parts.append(part) + + for part in parts: + entry = part.get_entry(pfmri) + if entry is None: + # Part doesn't have this FMRI, + # so skip it. + continue + merge_entry(entry, mdata) + return mdata + + def get_entry_actions( + self, pfmri, info_needed, excludes=EmptyI, locales=None + ): + """A generator function that produces Actions as it iterates + over the catalog entry of the specified FMRI corresponding to + the requested information). If the catalog doesn't contain + any action data for the package entry, and manifest_cb was + defined at Catalog creation time, the action data will be + lazy-loaded by the actions generator; otherwise it will + return an empty iterator. + + 'excludes' is a list of variants which will be used to determine + what should be allowed by the actions generator in addition to + what is specified by 'info_needed'. If not provided, only + 'info_needed' will determine what actions are returned. + + 'info_needed' is a set of one or more catalog constants + indicating the types of catalog data that will be returned + in 'actions' in addition to the above: + + DEPENDENCY + Depend and set Actions for package obsoletion, + renaming, variants. + + SUMMARY + Any remaining set Actions not listed above, such + as pkg.summary, pkg.description, etc. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + """ + + assert info_needed + if not locales: + locales = set(("C",)) + else: + locales = set(locales) - entry = self.get_entry(pfmri, info_needed=info_needed, - locales=locales) - if entry is None: - raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + entry = self.get_entry(pfmri, info_needed=info_needed, locales=locales) + if entry is None: + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) - try: - return self.__gen_actions(pfmri, entry["actions"], - excludes) - except KeyError: - if self.__manifest_cb: - return self.__gen_lazy_actions(pfmri, - info_needed, locales, excludes) - else: - return EmptyI + try: + return self.__gen_actions(pfmri, entry["actions"], excludes) + except KeyError: + if self.__manifest_cb: + return self.__gen_lazy_actions( + pfmri, info_needed, locales, excludes + ) + else: + return EmptyI - def get_entry_all_variants(self, pfmri): - """A generator function that yields tuples of the format - (var_name, variants); where var_name is the name of the - variant and variants is a list of the variants for that - name.""" + def get_entry_all_variants(self, pfmri): + """A generator function that yields tuples of the format + (var_name, variants); where var_name is the name of the + variant and variants is a list of the variants for that + name.""" - info_needed = [self.DEPENDENCY] - entry = self.get_entry(pfmri, info_needed=info_needed) - if entry is None: - raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + info_needed = [self.DEPENDENCY] + entry = self.get_entry(pfmri, info_needed=info_needed) + if entry is None: + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) - try: - actions = self.__gen_actions(pfmri, entry["actions"]) - except KeyError: - if self.__manifest_cb: - actions = self.__gen_lazy_actions(pfmri, - info_needed) - else: - return + try: + actions = self.__gen_actions(pfmri, entry["actions"]) + except KeyError: + if self.__manifest_cb: + actions = self.__gen_lazy_actions(pfmri, info_needed) + else: + return + for a in actions: + if a.name != "set": + continue + + attr_name = a.attrs["name"] + if not attr_name.startswith("variant"): + continue + yield attr_name, a.attrs["value"] + + def get_entry_signatures(self, pfmri): + """A generator function that yields tuples of the form (sig, + value) where 'sig' is the name of the signature, and 'value' is + the raw catalog value for the signature. Please note that the + data type of 'value' is dependent on the signature, so it may + be a string, list, dict, etc.""" + + entry = self.get_entry(pfmri) + if entry is None: + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + return ( + (k.split("signature-")[1], v) + for k, v in six.iteritems(entry) + if k.startswith("signature-") + ) + + def get_entry_variants(self, pfmri, name): + """A generator function that returns the variants for the + specified variant name. If no variants exist for the + specified name, None will be returned.""" + + for var_name, values in self.get_entry_all_variants(pfmri): + if var_name == name: + # A package can only have one set of values + # for a single variant name, so return it. + return values + return None + + def gen_packages( + self, + collect_attrs=False, + matched=None, + patterns=EmptyI, + pubs=EmptyI, + unmatched=None, + return_fmris=False, + ): + """A generator function that produces tuples of the form: + + ( + ( + pub, - (string) the publisher of the package + stem, - (string) the name of the package + version - (string) the version of the package + ), + states, - (list) states + attributes - (dict) package attributes + ) + + Results are always sorted by stem, publisher, and then in + descending version order. + + 'collect_attrs' is an optional boolean that indicates whether + all package attributes should be collected and returned in the + fifth element of the return tuple. If False, that element will + be an empty dictionary. + + 'matched' is an optional set to add matched patterns to. + + 'patterns' is an optional list of FMRI wildcard strings to + filter results by. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. + + 'unmatched' is an optional set to add unmatched patterns to. + + 'return_fmris' is an optional boolean value that indicates that + an FMRI object should be returned in place of the (pub, stem, + ver) tuple that is normally returned.""" + + # Each pattern in patterns can be a partial or full FMRI, so + # extract the individual components for use in filtering. + newest = False + illegals = [] + pat_tuples = {} + latest_pats = set() + seen = set() + npatterns = set() + for pat, error, pfmri, matcher in self.__parse_fmri_patterns(patterns): + if error: + illegals.append(error) + continue + + # Duplicate patterns are ignored. + sfmri = str(pfmri) + if sfmri in seen: + # A different form of the same pattern + # was specified already; ignore this + # one (e.g. pkg:/network/ping, + # /network/ping). + continue + + # Track used patterns. + seen.add(sfmri) + npatterns.add(pat) + + if getattr(pfmri.version, "match_latest", None): + latest_pats.add(pat) + pat_tuples[pat] = (pfmri.tuple(), matcher) + + patterns = npatterns + del npatterns, seen + + if illegals: + raise api_errors.PackageMatchErrors(illegal=illegals) + + # Keep track of listed stems for all other packages on a + # per-publisher basis. + nlist = collections.defaultdict(int) + + # Track matching patterns. + matched_pats = set() + pkg_matching_pats = None + + # Need dependency and summary actions. + cat_info = frozenset([self.DEPENDENCY, self.SUMMARY]) + + for t, entry, actions in self.entry_actions( + cat_info, ordered=True, pubs=pubs + ): + pub, stem, ver = t + + omit_ver = False + omit_package = None + + pkg_stem = "!".join((pub, stem)) + if newest and pkg_stem in nlist: + # A newer version has already been listed, so + # any additional entries need to be marked for + # omission before continuing. + omit_package = True + else: + nlist[pkg_stem] += 1 + + if matched is not None or unmatched is not None: + pkg_matching_pats = set() + if not omit_package: + ever = None + for pat in patterns: + (pat_pub, pat_stem, pat_ver), matcher = pat_tuples[pat] + + if pat_pub is not None and pub != pat_pub: + # Publisher doesn't match. + if omit_package is None: + omit_package = True + continue + + if matcher == fmri.exact_name_match: + if pat_stem != stem: + # Stem doesn't match. + if omit_package is None: + omit_package = True + continue + elif matcher == fmri.fmri_match: + if not ("/" + stem).endswith("/" + pat_stem): + # Stem doesn't match. + if omit_package is None: + omit_package = True + continue + elif matcher == fmri.glob_match: + if not fnmatch.fnmatchcase(stem, pat_stem): + # Stem doesn't match. + if omit_package is None: + omit_package = True + continue + + if pat_ver is not None: + if ever is None: + # Avoid constructing a + # version object more + # than once for each + # entry. + ever = pkg.version.Version(ver) + if not ever.is_successor( + pat_ver, pkg.version.CONSTRAINT_AUTO + ): + if omit_package is None: + omit_package = True + omit_ver = True + continue + + if pat in latest_pats and nlist[pkg_stem] > 1: + # Package allowed by pattern, + # but isn't the "latest" + # version. + if omit_package is None: + omit_package = True + omit_ver = True + continue + + # If this entry matched at least one + # pattern, then ensure it is returned. + omit_package = False + if matched is None and unmatched is None: + # It's faster to stop as soon + # as a match is found. + break + + # If caller has requested other match + # cases be returned, then all patterns + # must be tested for every entry. This + # is slower, so only done if necessary. + pkg_matching_pats.add(pat) + + if omit_package: + # Package didn't match critera; skip it. + continue + + # Collect attribute data if requested. + summ = None + + omit_var = False + states = set() + if collect_attrs: + # use OrderedDict to get a deterministic output + attrs = collections.defaultdict(lambda: OrderedDict([])) + else: + attrs = EmptyDict + + try: for a in actions: - if a.name != "set": - continue - - attr_name = a.attrs["name"] - if not attr_name.startswith("variant"): - continue - yield attr_name, a.attrs["value"] - - def get_entry_signatures(self, pfmri): - """A generator function that yields tuples of the form (sig, - value) where 'sig' is the name of the signature, and 'value' is - the raw catalog value for the signature. Please note that the - data type of 'value' is dependent on the signature, so it may - be a string, list, dict, etc.""" - - entry = self.get_entry(pfmri) - if entry is None: - raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) - return ( - (k.split("signature-")[1], v) - for k, v in six.iteritems(entry) - if k.startswith("signature-") - ) - - def get_entry_variants(self, pfmri, name): - """A generator function that returns the variants for the - specified variant name. If no variants exist for the - specified name, None will be returned.""" + if a.name != "set": + continue + + atname = a.attrs["name"] + atvalue = a.attrs["value"] + if collect_attrs: + atvlist = a.attrlist("value") + # mods = frozenset( + # (k1, frozenset([k1_1, k1_2])) + # (k2, frozenset([k2_1, k2_2])) + # ) + # will later be converted by the + # caller into a dict like: + # { + # k1: frozenset([k1_1, k1_2]), + # k2: frozenset([k2_1, k2_2]) + # } + mods = frozenset( + (k, frozenset(a.attrlist(k))) + for k in six.iterkeys(a.attrs) + if k not in ("name", "value") + ) + if mods not in attrs[atname]: + attrs[atname][mods] = atvlist + else: + attrs[atname][mods].extend(atvlist) - for var_name, values in self.get_entry_all_variants(pfmri): - if var_name == name: - # A package can only have one set of values - # for a single variant name, so return it. - return values - return None + if atname == "pkg.summary": + summ = atvalue + continue - def gen_packages(self, collect_attrs=False, matched=None, - patterns=EmptyI, pubs=EmptyI, unmatched=None, return_fmris=False): - """A generator function that produces tuples of the form: + if atname == "description": + if summ is not None: + continue - ( - ( - pub, - (string) the publisher of the package - stem, - (string) the name of the package - version - (string) the version of the package - ), - states, - (list) states - attributes - (dict) package attributes - ) - - Results are always sorted by stem, publisher, and then in - descending version order. - - 'collect_attrs' is an optional boolean that indicates whether - all package attributes should be collected and returned in the - fifth element of the return tuple. If False, that element will - be an empty dictionary. - - 'matched' is an optional set to add matched patterns to. - - 'patterns' is an optional list of FMRI wildcard strings to - filter results by. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. - - 'unmatched' is an optional set to add unmatched patterns to. - - 'return_fmris' is an optional boolean value that indicates that - an FMRI object should be returned in place of the (pub, stem, - ver) tuple that is normally returned.""" - - # Each pattern in patterns can be a partial or full FMRI, so - # extract the individual components for use in filtering. - newest = False - illegals = [] - pat_tuples = {} - latest_pats = set() - seen = set() - npatterns = set() - for pat, error, pfmri, matcher in self.__parse_fmri_patterns( - patterns): - if error: - illegals.append(error) - continue - - # Duplicate patterns are ignored. - sfmri = str(pfmri) - if sfmri in seen: - # A different form of the same pattern - # was specified already; ignore this - # one (e.g. pkg:/network/ping, - # /network/ping). - continue - - # Track used patterns. - seen.add(sfmri) - npatterns.add(pat) - - if getattr(pfmri.version, "match_latest", None): - latest_pats.add(pat) - pat_tuples[pat] = (pfmri.tuple(), matcher) - - patterns = npatterns - del npatterns, seen - - if illegals: - raise api_errors.PackageMatchErrors(illegal=illegals) - - # Keep track of listed stems for all other packages on a - # per-publisher basis. - nlist = collections.defaultdict(int) - - # Track matching patterns. - matched_pats = set() - pkg_matching_pats = None - - # Need dependency and summary actions. - cat_info = frozenset([self.DEPENDENCY, self.SUMMARY]) - - for t, entry, actions in self.entry_actions(cat_info, - ordered=True, pubs=pubs): - pub, stem, ver = t - - omit_ver = False - omit_package = None - - pkg_stem = "!".join((pub, stem)) - if newest and pkg_stem in nlist: - # A newer version has already been listed, so - # any additional entries need to be marked for - # omission before continuing. - omit_package = True - else: - nlist[pkg_stem] += 1 - - if matched is not None or unmatched is not None: - pkg_matching_pats = set() - if not omit_package: - ever = None - for pat in patterns: - (pat_pub, pat_stem, pat_ver), matcher = \ - pat_tuples[pat] - - if pat_pub is not None and \ - pub != pat_pub: - # Publisher doesn't match. - if omit_package is None: - omit_package = True - continue - - if matcher == fmri.exact_name_match: - if pat_stem != stem: - # Stem doesn't match. - if omit_package is None: - omit_package = \ - True - continue - elif matcher == fmri.fmri_match: - if not ("/" + stem).endswith( - "/" + pat_stem): - # Stem doesn't match. - if omit_package is None: - omit_package = \ - True - continue - elif matcher == fmri.glob_match: - if not fnmatch.fnmatchcase(stem, - pat_stem): - # Stem doesn't match. - if omit_package is None: - omit_package = \ - True - continue - - if pat_ver is not None: - if ever is None: - # Avoid constructing a - # version object more - # than once for each - # entry. - ever = pkg.version.Version(ver) - if not ever.is_successor(pat_ver, - pkg.version.CONSTRAINT_AUTO): - if omit_package is None: - omit_package = \ - True - omit_ver = True - continue - - if pat in latest_pats and \ - nlist[pkg_stem] > 1: - # Package allowed by pattern, - # but isn't the "latest" - # version. - if omit_package is None: - omit_package = True - omit_ver = True - continue - - # If this entry matched at least one - # pattern, then ensure it is returned. - omit_package = False - if (matched is None and - unmatched is None): - # It's faster to stop as soon - # as a match is found. - break - - # If caller has requested other match - # cases be returned, then all patterns - # must be tested for every entry. This - # is slower, so only done if necessary. - pkg_matching_pats.add(pat) - - if omit_package: - # Package didn't match critera; skip it. - continue - - # Collect attribute data if requested. - summ = None - - omit_var = False - states = set() + # Historical summary field. + summ = atvalue if collect_attrs: - # use OrderedDict to get a deterministic output - attrs = collections.defaultdict( - lambda: OrderedDict([])) - else: - attrs = EmptyDict - - try: - for a in actions: - if a.name != "set": - continue - - atname = a.attrs["name"] - atvalue = a.attrs["value"] - if collect_attrs: - atvlist = a.attrlist("value") - # mods = frozenset( - # (k1, frozenset([k1_1, k1_2])) - # (k2, frozenset([k2_1, k2_2])) - # ) - # will later be converted by the - # caller into a dict like: - # { - # k1: frozenset([k1_1, k1_2]), - # k2: frozenset([k2_1, k2_2]) - # } - mods = frozenset( - (k, frozenset(a.attrlist(k))) - for k in six.iterkeys(a.attrs) - if k not in ("name", "value") - ) - if mods not in attrs[atname]: - attrs[atname][mods] = atvlist - else: - attrs[atname][mods].extend( - atvlist) - - if atname == "pkg.summary": - summ = atvalue - continue - - if atname == "description": - if summ is not None: - continue - - # Historical summary field. - summ = atvalue - if collect_attrs: - if mods not in \ - attrs["pkg.summary"]: - attrs["pkg.summary"]\ - [mods] = atvlist - else: - attrs["pkg.summary"]\ - [mods].extend( - atvlist) - continue - - if atname == "pkg.renamed": - if atvalue == "true": - states.add( - pkgdefs.PKG_STATE_RENAMED) - continue - if atname == "pkg.obsolete": - if atvalue == "true": - states.add( - pkgdefs.PKG_STATE_OBSOLETE) - continue - if atname == "pkg.legacy": - if atvalue == "true": - states.add( - pkgdefs.PKG_STATE_LEGACY) - except api_errors.InvalidPackageErrors: - # Ignore errors for packages that have invalid - # or unsupported metadata. - states.add(pkgdefs.PKG_STATE_UNSUPPORTED) - - if omit_package: - # Package didn't match criteria; skip it. - if omit_ver and nlist[pkg_stem] == 1: - del nlist[pkg_stem] - continue - - if matched is not None or unmatched is not None: - # Only after all other filtering has been - # applied are the patterns that the package - # matched considered "matching". - matched_pats.update(pkg_matching_pats) - - # Return the requested package data. - if return_fmris: - pfmri = fmri.PkgFmri(name=stem, publisher=pub, - version=ver) - yield (pfmri, states, attrs) - else: - yield (t, states, attrs) - - if matched is not None: - # Caller has requested that matched patterns be - # returned. - matched.update(matched_pats) - if unmatched is not None: - # Caller has requested that unmatched patterns be - # returned. - unmatched.update(set(pat_tuples.keys()) - matched_pats) - - def get_matching_fmris(self, patterns): - """Given a user-specified list of FMRI pattern strings, return - a tuple of ('matching', 'references', 'unmatched'), where - matching is a dict of matching fmris, references is a dict of - the patterns indexed by matching FMRI, and unmatched is a set of - the patterns that did not match any FMRIs respectively: - - { - pkgname: [fmri1, fmri2, ...], - pkgname: [fmri1, fmri2, ...], - ... - } - - { - fmri1: [pat1, pat2, ...], - fmri2: [pat1, pat2, ...], - ... - } + if mods not in attrs["pkg.summary"]: + attrs["pkg.summary"][mods] = atvlist + else: + attrs["pkg.summary"][mods].extend(atvlist) + continue + + if atname == "pkg.renamed": + if atvalue == "true": + states.add(pkgdefs.PKG_STATE_RENAMED) + continue + if atname == "pkg.obsolete": + if atvalue == "true": + states.add(pkgdefs.PKG_STATE_OBSOLETE) + continue + if atname == "pkg.legacy": + if atvalue == "true": + states.add(pkgdefs.PKG_STATE_LEGACY) + except api_errors.InvalidPackageErrors: + # Ignore errors for packages that have invalid + # or unsupported metadata. + states.add(pkgdefs.PKG_STATE_UNSUPPORTED) + + if omit_package: + # Package didn't match criteria; skip it. + if omit_ver and nlist[pkg_stem] == 1: + del nlist[pkg_stem] + continue + + if matched is not None or unmatched is not None: + # Only after all other filtering has been + # applied are the patterns that the package + # matched considered "matching". + matched_pats.update(pkg_matching_pats) + + # Return the requested package data. + if return_fmris: + pfmri = fmri.PkgFmri(name=stem, publisher=pub, version=ver) + yield (pfmri, states, attrs) + else: + yield (t, states, attrs) + + if matched is not None: + # Caller has requested that matched patterns be + # returned. + matched.update(matched_pats) + if unmatched is not None: + # Caller has requested that unmatched patterns be + # returned. + unmatched.update(set(pat_tuples.keys()) - matched_pats) + + def get_matching_fmris(self, patterns): + """Given a user-specified list of FMRI pattern strings, return + a tuple of ('matching', 'references', 'unmatched'), where + matching is a dict of matching fmris, references is a dict of + the patterns indexed by matching FMRI, and unmatched is a set of + the patterns that did not match any FMRIs respectively: + + { + pkgname: [fmri1, fmri2, ...], + pkgname: [fmri1, fmri2, ...], + ... + } - set(['unmatched1', 'unmatchedN']) - - 'patterns' is the list of package patterns to match. - - Constraint used is always AUTO as per expected UI behavior when - determining successor versions. - - Note that patterns starting w/ pkg:/ require an exact match; - patterns containing '*' will using fnmatch rules; the default - trailing match rules are used for remaining patterns. - - Exactly duplicated patterns are ignored. - - Routine raises PackageMatchErrors if errors occur: it is - illegal to specify multiple different patterns that match the - same package name. Only patterns that contain wildcards are - allowed to match multiple packages. - """ - - # problems we check for - illegals = [] - unmatched = set() - multimatch = [] - multispec = [] - pat_data = [] - wildcard_patterns = set() - - # Each pattern in patterns can be a partial or full FMRI, so - # extract the individual components for use in filtering. - latest_pats = set() - seen = set() - npatterns = set() - for pat, error, pfmri, matcher in self.__parse_fmri_patterns( - patterns): - if error: - illegals.append(error) - continue - - # Duplicate patterns are ignored. - sfmri = str(pfmri) - if sfmri in seen: - # A different form of the same pattern - # was specified already; ignore this - # one (e.g. pkg:/network/ping, - # /network/ping). - continue - - # Track used patterns. - seen.add(sfmri) - npatterns.add(pat) - if "*" in pfmri.pkg_name or "?" in pfmri.pkg_name: - wildcard_patterns.add(pat) - - if getattr(pfmri.version, "match_latest", None): - latest_pats.add(pat) - pat_data.append((pat, matcher, pfmri)) - - patterns = npatterns - del npatterns, seen - - if illegals: - raise api_errors.PackageMatchErrors(illegal=illegals) - - # Create a dictionary of patterns, with each value being a - # dictionary of pkg names & fmris that match that pattern. - ret = dict(zip(patterns, [dict() for i in patterns])) - - for name in self.names(): - for pat, matcher, pfmri in pat_data: - pub = pfmri.publisher - version = pfmri.version - if not matcher(name, pfmri.pkg_name): - continue # name doesn't match - for ver, entries in \ - self.entries_by_version(name): - if version and not ver.is_successor( - version, - pkg.version.CONSTRAINT_AUTO): - continue # version doesn't match - for f, metadata in entries: - fpub = f.publisher - if pub and pub != fpub: - # specified pubs - # conflict - continue - ret[pat].setdefault(f.pkg_name, - []).append(f) - - # Discard all but the newest version of each match. - if latest_pats: - # Rebuild ret based on latest version of every package. - latest = {} - nret = {} - for p in patterns: - if p not in latest_pats or not ret[p]: - nret[p] = ret[p] - continue - - nret[p] = {} - for pkg_name in ret[p]: - nret[p].setdefault(pkg_name, []) - for f in ret[p][pkg_name]: - nver = latest.get(f.pkg_name, - None) - if nver > f.version: - # Not the newest. - continue - if nver == f.version: - # Allow for multiple - # FMRIs of the same - # latest version. - nret[p][pkg_name].append( - f) - continue - - latest[f.pkg_name] = f.version - nret[p][pkg_name] = [f] - - # Assign new version of ret and discard latest list. - ret = nret - del latest - - # Determine match failures. - matchdict = {} - for p in patterns: - l = len(ret[p]) - if l == 0: # no matches at all - unmatched.add(p) - elif l > 1 and p not in wildcard_patterns: - # multiple matches - multimatch.append((p, [ - ret[p][n][0].get_pkg_stem() - for n in ret[p] - ])) - else: - # single match or wildcard - for k in ret[p].keys(): - # for each matching package name - matchdict.setdefault(k, []).append(p) - - if multimatch: - raise api_errors.PackageMatchErrors( - multiple_matches=multimatch) - - # Group the matching patterns by package name and allow multiple - # fmri matches. - proposed_dict = {} - for d in ret.values(): - for k, l in six.iteritems(d): - proposed_dict.setdefault(k, []).extend(l) - - # construct references so that we can know which pattern - # generated which fmris... - references = dict([ - (f, p) - for p in ret.keys() - for flist in ret[p].values() - for f in flist - ]) - - return proposed_dict, references, unmatched - - def get_package_counts_by_pub(self, pubs=EmptyI): - """Returns a generator of tuples of the form (pub, - package_count, package_version_count). 'pub' is the publisher - prefix, 'package_count' is the number of unique packages for the - publisher, and 'package_version_count' is the number of unique - package versions for the publisher. - """ - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - - # This construction is necessary to get python to - # return no results properly to callers expecting - # a generator function. - return iter(()) - return base.get_package_counts_by_pub(pubs=pubs) - - def get_part(self, name, must_exist=False): - """Returns the CatalogPart object for the named catalog part. - - 'must_exist' is an optional boolean value that indicates that - the catalog part must already exist in-memory or on-disk, if - not a value of None will be returned.""" - - # First, check if the part has already been cached, and if so, - # return it. - part = self.__parts.get(name, None) - if part is not None: - return part - elif not self.meta_root and must_exist: - return - - # If the caller said the part must_exist, then it must already - # be part of the catalog attributes to be valid. - aparts = self._attrs.parts - if must_exist and name not in aparts: - return - - # Next, since the part hasn't been cached, create an object - # for it and add it to catalog attributes. - part = CatalogPart(name, meta_root=self.meta_root, - ordered=not self.__batch_mode, sign=self.__sign) - if must_exist and self.meta_root and not part.exists: - # This is a double-check for the client case where - # there is a part that is known to the catalog but - # that the client has purposefully not retrieved. - # (Think locale specific data.) - return - - self.__parts[name] = part - - if name not in aparts: - # Add a new entry to the catalog attributes for this new - # part since it didn't exist previously. - aparts[name] = { - "last-modified": part.last_modified - } - return part - - def get_updates_needed(self, path): - """Returns a list of the catalog files needed to update - the existing catalog parts, based on the contents of the - catalog.attrs file in the directory indicated by 'path'. - A value of None will be returned if the catalog has - not been modified, while an empty list will be returned - if no catalog parts need to be updated, but the catalog - itself has changed.""" - - new_attrs = CatalogAttrs(meta_root=path) - if not new_attrs.exists: - # No updates needed (not even to attrs), so return None. - return None - - old_attrs = self._attrs - if old_attrs.created != new_attrs.created: - # It's very likely that the catalog has been recreated - # or this is a completely different catalog than was - # expected. In either case, an update isn't possible. - raise api_errors.BadCatalogUpdateIdentity(path) - - if new_attrs.last_modified == old_attrs.last_modified: - # No updates needed (not even to attrs), so return None. - return None - - # First, verify that all of the catalog parts the client has - # still exist. If they no longer exist, the catalog is no - # longer valid and cannot be updated. - parts = {} - incremental = True - for name in old_attrs.parts: - if name not in new_attrs.parts: - raise api_errors.BadCatalogUpdateIdentity(path) - - old_lm = old_attrs.parts[name]["last-modified"] - new_lm = new_attrs.parts[name]["last-modified"] - - if new_lm == old_lm: - # Part hasn't changed. - continue - elif new_lm < old_lm: - raise api_errors.ObsoleteCatalogUpdate(path) - - # The last component of the update name is the locale. - locale = name.split(".", 2)[2] - - # Now check to see if an update log is still offered for - # the last time this catalog part was updated. If it - # does not, then an incremental update cannot be safely - # performed since updates may be missing. - logdate = datetime_to_update_ts(old_lm) - logname = "update.{0}.{1}".format(logdate, locale) - - if logname not in new_attrs.updates: - incremental = False - - parts.setdefault(locale, set()) - parts[locale].add(name) - - # XXX in future, add current locale to this. For now, just - # ensure that all of the locales of parts that were changed - # and exist on-disk are included. - locales = set(("C",)) - locales.update(set(parts.keys())) - - # Now determine if there are any new parts for this locale that - # this version of the API knows how to use that the client - # doesn't already have. - for name in new_attrs.parts: - if name in parts or name in old_attrs.parts: - continue - - # The last component of the name is the locale. - locale = name.split(".", 2)[2] - if locale not in locales: - continue - - # Currently, only these parts are used by the client, - # so only they need to be retrieved. - if name == self.__BASE_PART or \ - name == self.__DEPS_PART or \ - name.startswith(self.__SUMM_PART_PFX): - incremental = False - - # If a new part has been added for the current - # locale, then incremental updates can't be - # performed since updates for this locale can - # only be applied to parts that already exist. - parts.setdefault(locale, set()) - parts[locale].add(name) - - if not parts: - # No updates needed to catalog parts on-disk, but - # catalog has changed. - return [] - elif not incremental: - # Since an incremental update cannot be performed, - # just return the updated parts for retrieval. - updates = set() - for locale in parts: - updates.update(parts[locale]) - return updates - - # Finally, determine the update logs needed based on the catalog - # parts that need updating on a per-locale basis. - updates = set() - for locale in parts: - # Determine the newest catalog part for a given locale, - # this will be used to determine which update logs are - # needed for an incremental update. - last_lm = None - for name in parts[locale]: - if name not in old_attrs.parts: - continue - - lm = old_attrs.parts[name]["last-modified"] - if not last_lm or lm > last_lm: - last_lm = lm - - for name, uattrs in six.iteritems(new_attrs.updates): - up_lm = uattrs["last-modified"] - - # The last component of the update name is the - # locale. - up_locale = name.split(".", 2)[2] - - if not up_locale == locale: - # This update log doesn't apply to the - # locale being evaluated for updates. - continue - - if up_lm <= last_lm: - # Older or same as newest catalog part - # for this locale; so skip. - continue - - # If this updatelog was changed after the - # newest catalog part for this locale, then - # it is needed to update one or more catalog - # parts for this locale. - updates.add(name) - - # Ensure updates are in chronological ascending order. - return sorted(updates) - - def names(self, pubs=EmptyI): - """Returns a set containing the names of all the packages in - the Catalog. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - return set() - return base.names(pubs=pubs) - - @property - def package_count(self): - """The number of unique packages in the catalog.""" - return self._attrs.package_count - - @property - def package_version_count(self): - """The number of unique package versions in the catalog.""" - return self._attrs.package_version_count - - @property - def parts(self): - """A dict containing the list of CatalogParts that the catalog - is composed of along with information about each part.""" - - return self._attrs.parts - - def pkg_names(self, pubs=EmptyI): - """A generator function that produces package tuples of the form - (pub, stem) as it iterates over the contents of the catalog. - - 'pubs' is an optional list that contains the prefixes of the - publishers to restrict the results to.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - - # This construction is necessary to get python to - # return no results properly to callers expecting - # a generator function. - return iter(()) - return base.pkg_names(pubs=pubs) - - def publishers(self): - """Returns a set containing the prefixes of all the publishers - in the Catalog.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - return set() - return set(p for p in base.publishers()) - - def remove_package(self, pfmri): - """Remove a package and its metadata.""" - - assert not self.read_only - - self.__lock_catalog() - try: - # The package has to be removed from every known part. - entries = {} - - # Use the same operation time and date for all - # operations so that the last modification times - # of all catalog parts and update logs will be - # synchronized. - op_time = datetime.datetime.utcnow() - - for name in self._attrs.parts: - part = self.get_part(name) - if part is None: - continue - - pkg_entry = part.get_entry(pfmri) - if pkg_entry is None: - if name == self.__BASE_PART: - # Entry should exist in at least - # the base part. - raise api_errors.UnknownCatalogEntry( - pfmri.get_fmri()) - # Skip; package's presence is optional - # in other parts. - continue - - part.remove(pfmri, op_time=op_time) - if self.log_updates: - entries[part.name] = pkg_entry - - self.__log_update(pfmri, CatalogUpdate.REMOVE, op_time, - entries=entries) - finally: - self.__unlock_catalog() - - def save(self, fmt='utf8'): - """Finalize current state and save to file if possible.""" - - self.__lock_catalog() - try: - self.__save(fmt) - finally: - self.__unlock_catalog() - - @property - def signatures(self): - """Returns a dict of the files the catalog is composed of along - with the last known signatures of each if they are available.""" - - attrs = self._attrs - sigs = { - attrs.name: attrs.signatures - } + { + fmri1: [pat1, pat2, ...], + fmri2: [pat1, pat2, ...], + ... + } - for items in (attrs.parts, attrs.updates): - for name in items: - entry = sigs[name] = {} - for k in items[name]: - try: - sig = k.split("signature-")[1] - entry[sig] = items[name][k] - except IndexError: - # Not a signature entry. - continue - return sigs - - def tuples(self, last=False, ordered=False, pubs=EmptyI): - """A generator function that produces FMRI tuples as it - iterates over the contents of the catalog. - - 'last' is a boolean value that indicates only the last FMRI - tuple for each package on a per-publisher basis should be - returned. As long as the catalog has been saved since the - last modifying operation, or finalize() has has been called, - this will also be the newest version of the package. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - # Catalog contains nothing. - - # This construction is necessary to get python to - # return no results properly to callers expecting - # a generator function. - return iter(()) - return base.tuples(last=last, ordered=ordered, pubs=pubs) - - def tuple_entries(self, info_needed=EmptyI, last=False, locales=None, - ordered=False, pubs=EmptyI): - """A generator function that produces tuples of the format - ((pub, stem, version), entry, actions) as it iterates over - the contents of the catalog (where 'metadata' is a dict - containing the requested information). - - 'metadata' always contains the following information at a - minimum: - - BASE - 'metadata' will be populated with Manifest - signature data, if available, using key-value - pairs of the form 'signature-': value. - - 'info_needed' is an optional list of one or more catalog - constants indicating the types of catalog data that will - be returned in 'metadata' in addition to the above: - - DEPENDENCY - 'metadata' will contain depend and set Actions - for package obsoletion, renaming, variants, - and facets stored in a list under the - key 'actions'. - - SUMMARY - 'metadata' will contain any remaining Actions - not listed above, such as pkg.summary, - pkg.description, etc. in a list under the key - 'actions'. - - 'last' is a boolean value that indicates only the last entry - for each package on a per-publisher basis should be returned. - As long as the catalog has been saved since the last modifying - operation, or finalize() has has been called, this will also be - the newest version of the package. - - 'locales' is an optional set of locale names for which Actions - should be returned. The default is set(('C',)) if not provided. - Note that unlike actions(), catalog entries will not lazy-load - action data if it is missing from the catalog. - - 'ordered' is an optional boolean value that indicates that - results should sorted by stem and then by publisher and - be in descending version order. If False, results will be - in a ascending version order on a per-publisher, per-stem - basis. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to.""" - - return self.__entries(info_needed=info_needed, - locales=locales, last_version=last, ordered=ordered, - pubs=pubs, tuples=True) - - @property - def updates(self): - """A dict containing the list of known updates for the catalog - along with information about each update.""" - - return self._attrs.updates - - def update_entry(self, metadata, pfmri=None, pub=None, stem=None, - ver=None): - """Updates the metadata stored in a package's BASE catalog - record for the specified package. Cannot be used when read_only - or log_updates is enabled; should never be used with a Catalog - intended for incremental update usage. - - 'metadata' must be a dict of additional metadata to store with - the package's BASE record. - - 'pfmri' is the FMRI of the package to update the entry for. - - 'pub' is the publisher of the package. - - 'stem' is the stem of the package. - - 'ver' is the version string of the package. - - 'pfmri' or 'pub', 'stem', and 'ver' must be provided. - """ - - assert pfmri or (pub and stem and ver) - assert not self.log_updates and not self.read_only - - base = self.get_part(self.__BASE_PART, must_exist=True) - if base is None: - if not pfmri: - pfmri = fmri.PkgFmri(name=stem, publisher=pub, - version=ver) + set(['unmatched1', 'unmatchedN']) + + 'patterns' is the list of package patterns to match. + + Constraint used is always AUTO as per expected UI behavior when + determining successor versions. + + Note that patterns starting w/ pkg:/ require an exact match; + patterns containing '*' will using fnmatch rules; the default + trailing match rules are used for remaining patterns. + + Exactly duplicated patterns are ignored. + + Routine raises PackageMatchErrors if errors occur: it is + illegal to specify multiple different patterns that match the + same package name. Only patterns that contain wildcards are + allowed to match multiple packages. + """ + + # problems we check for + illegals = [] + unmatched = set() + multimatch = [] + multispec = [] + pat_data = [] + wildcard_patterns = set() + + # Each pattern in patterns can be a partial or full FMRI, so + # extract the individual components for use in filtering. + latest_pats = set() + seen = set() + npatterns = set() + for pat, error, pfmri, matcher in self.__parse_fmri_patterns(patterns): + if error: + illegals.append(error) + continue + + # Duplicate patterns are ignored. + sfmri = str(pfmri) + if sfmri in seen: + # A different form of the same pattern + # was specified already; ignore this + # one (e.g. pkg:/network/ping, + # /network/ping). + continue + + # Track used patterns. + seen.add(sfmri) + npatterns.add(pat) + if "*" in pfmri.pkg_name or "?" in pfmri.pkg_name: + wildcard_patterns.add(pat) + + if getattr(pfmri.version, "match_latest", None): + latest_pats.add(pat) + pat_data.append((pat, matcher, pfmri)) + + patterns = npatterns + del npatterns, seen + + if illegals: + raise api_errors.PackageMatchErrors(illegal=illegals) + + # Create a dictionary of patterns, with each value being a + # dictionary of pkg names & fmris that match that pattern. + ret = dict(zip(patterns, [dict() for i in patterns])) + + for name in self.names(): + for pat, matcher, pfmri in pat_data: + pub = pfmri.publisher + version = pfmri.version + if not matcher(name, pfmri.pkg_name): + continue # name doesn't match + for ver, entries in self.entries_by_version(name): + if version and not ver.is_successor( + version, pkg.version.CONSTRAINT_AUTO + ): + continue # version doesn't match + for f, metadata in entries: + fpub = f.publisher + if pub and pub != fpub: + # specified pubs + # conflict + continue + ret[pat].setdefault(f.pkg_name, []).append(f) + + # Discard all but the newest version of each match. + if latest_pats: + # Rebuild ret based on latest version of every package. + latest = {} + nret = {} + for p in patterns: + if p not in latest_pats or not ret[p]: + nret[p] = ret[p] + continue + + nret[p] = {} + for pkg_name in ret[p]: + nret[p].setdefault(pkg_name, []) + for f in ret[p][pkg_name]: + nver = latest.get(f.pkg_name, None) + if nver > f.version: + # Not the newest. + continue + if nver == f.version: + # Allow for multiple + # FMRIs of the same + # latest version. + nret[p][pkg_name].append(f) + continue + + latest[f.pkg_name] = f.version + nret[p][pkg_name] = [f] + + # Assign new version of ret and discard latest list. + ret = nret + del latest + + # Determine match failures. + matchdict = {} + for p in patterns: + l = len(ret[p]) + if l == 0: # no matches at all + unmatched.add(p) + elif l > 1 and p not in wildcard_patterns: + # multiple matches + multimatch.append( + (p, [ret[p][n][0].get_pkg_stem() for n in ret[p]]) + ) + else: + # single match or wildcard + for k in ret[p].keys(): + # for each matching package name + matchdict.setdefault(k, []).append(p) + + if multimatch: + raise api_errors.PackageMatchErrors(multiple_matches=multimatch) + + # Group the matching patterns by package name and allow multiple + # fmri matches. + proposed_dict = {} + for d in ret.values(): + for k, l in six.iteritems(d): + proposed_dict.setdefault(k, []).extend(l) + + # construct references so that we can know which pattern + # generated which fmris... + references = dict( + [ + (f, p) + for p in ret.keys() + for flist in ret[p].values() + for f in flist + ] + ) + + return proposed_dict, references, unmatched + + def get_package_counts_by_pub(self, pubs=EmptyI): + """Returns a generator of tuples of the form (pub, + package_count, package_version_count). 'pub' is the publisher + prefix, 'package_count' is the number of unique packages for the + publisher, and 'package_version_count' is the number of unique + package versions for the publisher. + """ + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + + # This construction is necessary to get python to + # return no results properly to callers expecting + # a generator function. + return iter(()) + return base.get_package_counts_by_pub(pubs=pubs) + + def get_part(self, name, must_exist=False): + """Returns the CatalogPart object for the named catalog part. + + 'must_exist' is an optional boolean value that indicates that + the catalog part must already exist in-memory or on-disk, if + not a value of None will be returned.""" + + # First, check if the part has already been cached, and if so, + # return it. + part = self.__parts.get(name, None) + if part is not None: + return part + elif not self.meta_root and must_exist: + return + + # If the caller said the part must_exist, then it must already + # be part of the catalog attributes to be valid. + aparts = self._attrs.parts + if must_exist and name not in aparts: + return + + # Next, since the part hasn't been cached, create an object + # for it and add it to catalog attributes. + part = CatalogPart( + name, + meta_root=self.meta_root, + ordered=not self.__batch_mode, + sign=self.__sign, + ) + if must_exist and self.meta_root and not part.exists: + # This is a double-check for the client case where + # there is a part that is known to the catalog but + # that the client has purposefully not retrieved. + # (Think locale specific data.) + return + + self.__parts[name] = part + + if name not in aparts: + # Add a new entry to the catalog attributes for this new + # part since it didn't exist previously. + aparts[name] = {"last-modified": part.last_modified} + return part + + def get_updates_needed(self, path): + """Returns a list of the catalog files needed to update + the existing catalog parts, based on the contents of the + catalog.attrs file in the directory indicated by 'path'. + A value of None will be returned if the catalog has + not been modified, while an empty list will be returned + if no catalog parts need to be updated, but the catalog + itself has changed.""" + + new_attrs = CatalogAttrs(meta_root=path) + if not new_attrs.exists: + # No updates needed (not even to attrs), so return None. + return None + + old_attrs = self._attrs + if old_attrs.created != new_attrs.created: + # It's very likely that the catalog has been recreated + # or this is a completely different catalog than was + # expected. In either case, an update isn't possible. + raise api_errors.BadCatalogUpdateIdentity(path) + + if new_attrs.last_modified == old_attrs.last_modified: + # No updates needed (not even to attrs), so return None. + return None + + # First, verify that all of the catalog parts the client has + # still exist. If they no longer exist, the catalog is no + # longer valid and cannot be updated. + parts = {} + incremental = True + for name in old_attrs.parts: + if name not in new_attrs.parts: + raise api_errors.BadCatalogUpdateIdentity(path) + + old_lm = old_attrs.parts[name]["last-modified"] + new_lm = new_attrs.parts[name]["last-modified"] + + if new_lm == old_lm: + # Part hasn't changed. + continue + elif new_lm < old_lm: + raise api_errors.ObsoleteCatalogUpdate(path) + + # The last component of the update name is the locale. + locale = name.split(".", 2)[2] + + # Now check to see if an update log is still offered for + # the last time this catalog part was updated. If it + # does not, then an incremental update cannot be safely + # performed since updates may be missing. + logdate = datetime_to_update_ts(old_lm) + logname = "update.{0}.{1}".format(logdate, locale) + + if logname not in new_attrs.updates: + incremental = False + + parts.setdefault(locale, set()) + parts[locale].add(name) + + # XXX in future, add current locale to this. For now, just + # ensure that all of the locales of parts that were changed + # and exist on-disk are included. + locales = set(("C",)) + locales.update(set(parts.keys())) + + # Now determine if there are any new parts for this locale that + # this version of the API knows how to use that the client + # doesn't already have. + for name in new_attrs.parts: + if name in parts or name in old_attrs.parts: + continue + + # The last component of the name is the locale. + locale = name.split(".", 2)[2] + if locale not in locales: + continue + + # Currently, only these parts are used by the client, + # so only they need to be retrieved. + if ( + name == self.__BASE_PART + or name == self.__DEPS_PART + or name.startswith(self.__SUMM_PART_PFX) + ): + incremental = False + + # If a new part has been added for the current + # locale, then incremental updates can't be + # performed since updates for this locale can + # only be applied to parts that already exist. + parts.setdefault(locale, set()) + parts[locale].add(name) + + if not parts: + # No updates needed to catalog parts on-disk, but + # catalog has changed. + return [] + elif not incremental: + # Since an incremental update cannot be performed, + # just return the updated parts for retrieval. + updates = set() + for locale in parts: + updates.update(parts[locale]) + return updates + + # Finally, determine the update logs needed based on the catalog + # parts that need updating on a per-locale basis. + updates = set() + for locale in parts: + # Determine the newest catalog part for a given locale, + # this will be used to determine which update logs are + # needed for an incremental update. + last_lm = None + for name in parts[locale]: + if name not in old_attrs.parts: + continue + + lm = old_attrs.parts[name]["last-modified"] + if not last_lm or lm > last_lm: + last_lm = lm + + for name, uattrs in six.iteritems(new_attrs.updates): + up_lm = uattrs["last-modified"] + + # The last component of the update name is the + # locale. + up_locale = name.split(".", 2)[2] + + if not up_locale == locale: + # This update log doesn't apply to the + # locale being evaluated for updates. + continue + + if up_lm <= last_lm: + # Older or same as newest catalog part + # for this locale; so skip. + continue + + # If this updatelog was changed after the + # newest catalog part for this locale, then + # it is needed to update one or more catalog + # parts for this locale. + updates.add(name) + + # Ensure updates are in chronological ascending order. + return sorted(updates) + + def names(self, pubs=EmptyI): + """Returns a set containing the names of all the packages in + the Catalog. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + return set() + return base.names(pubs=pubs) + + @property + def package_count(self): + """The number of unique packages in the catalog.""" + return self._attrs.package_count + + @property + def package_version_count(self): + """The number of unique package versions in the catalog.""" + return self._attrs.package_version_count + + @property + def parts(self): + """A dict containing the list of CatalogParts that the catalog + is composed of along with information about each part.""" + + return self._attrs.parts + + def pkg_names(self, pubs=EmptyI): + """A generator function that produces package tuples of the form + (pub, stem) as it iterates over the contents of the catalog. + + 'pubs' is an optional list that contains the prefixes of the + publishers to restrict the results to.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + + # This construction is necessary to get python to + # return no results properly to callers expecting + # a generator function. + return iter(()) + return base.pkg_names(pubs=pubs) + + def publishers(self): + """Returns a set containing the prefixes of all the publishers + in the Catalog.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + return set() + return set(p for p in base.publishers()) + + def remove_package(self, pfmri): + """Remove a package and its metadata.""" + + assert not self.read_only + + self.__lock_catalog() + try: + # The package has to be removed from every known part. + entries = {} + + # Use the same operation time and date for all + # operations so that the last modification times + # of all catalog parts and update logs will be + # synchronized. + op_time = datetime.datetime.utcnow() + + for name in self._attrs.parts: + part = self.get_part(name) + if part is None: + continue + + pkg_entry = part.get_entry(pfmri) + if pkg_entry is None: + if name == self.__BASE_PART: + # Entry should exist in at least + # the base part. raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + # Skip; package's presence is optional + # in other parts. + continue - # get_entry returns the actual catalog entry, so updating it - # simply requires reassignment. - entry = base.get_entry(pfmri=pfmri, pub=pub, stem=stem, ver=ver) - if entry is None: - if not pfmri: - pfmri = fmri.PkgFmri(name=stem, publisher=pub, - version=ver) - raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) - if metadata is None: - if "metadata" in entry: - del entry["metadata"] - return - entry["metadata"] = metadata + part.remove(pfmri, op_time=op_time) + if self.log_updates: + entries[part.name] = pkg_entry - op_time = datetime.datetime.utcnow() - attrs = self._attrs - attrs.last_modified = op_time - attrs.parts[base.name] = { - "last-modified": op_time - } - base.last_modified = op_time - - def validate(self, require_signatures=False): - """Verifies whether the signatures for the contents of the - catalog match the current signature data. Raises the - exception named 'BadCatalogSignatures' on failure.""" - - self._attrs.validate(require_signatures=require_signatures) - - def get_sigs(mdata): - sigs = {} - for key in mdata: - if not key.startswith("signature-"): - continue - sig = key.split("signature-")[1] - sigs[sig] = mdata[key] - if not sigs: - # Allow validate() to perform its own fallback - # logic if signature data isn't available. - return None - return sigs - - for name, mdata in six.iteritems(self._attrs.parts): - part = self.get_part(name, must_exist=True) - if part is None: - # Part does not exist; no validation needed. - continue - part.validate(signatures=get_sigs(mdata), - require_signatures=require_signatures) - - for name, mdata in six.iteritems(self._attrs.updates): - ulog = self.__get_update(name, cache=False, - must_exist=True) - if ulog is None: - # Update does not exist; no validation needed. - continue - ulog.validate(signatures=get_sigs(mdata), - require_signatures=require_signatures) - - batch_mode = property(__get_batch_mode, __set_batch_mode) - last_modified = property(__get_last_modified, __set_last_modified, - doc="A UTC datetime object indicating the last time the catalog " - "was modified.") - meta_root = property(__get_meta_root, __set_meta_root) - sign = property(__get_sign, __set_sign) - version = property(__get_version, __set_version) + self.__log_update( + pfmri, CatalogUpdate.REMOVE, op_time, entries=entries + ) + finally: + self.__unlock_catalog() + + def save(self, fmt="utf8"): + """Finalize current state and save to file if possible.""" + + self.__lock_catalog() + try: + self.__save(fmt) + finally: + self.__unlock_catalog() + + @property + def signatures(self): + """Returns a dict of the files the catalog is composed of along + with the last known signatures of each if they are available.""" + + attrs = self._attrs + sigs = {attrs.name: attrs.signatures} + + for items in (attrs.parts, attrs.updates): + for name in items: + entry = sigs[name] = {} + for k in items[name]: + try: + sig = k.split("signature-")[1] + entry[sig] = items[name][k] + except IndexError: + # Not a signature entry. + continue + return sigs + + def tuples(self, last=False, ordered=False, pubs=EmptyI): + """A generator function that produces FMRI tuples as it + iterates over the contents of the catalog. + + 'last' is a boolean value that indicates only the last FMRI + tuple for each package on a per-publisher basis should be + returned. As long as the catalog has been saved since the + last modifying operation, or finalize() has has been called, + this will also be the newest version of the package. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + # Catalog contains nothing. + + # This construction is necessary to get python to + # return no results properly to callers expecting + # a generator function. + return iter(()) + return base.tuples(last=last, ordered=ordered, pubs=pubs) + + def tuple_entries( + self, + info_needed=EmptyI, + last=False, + locales=None, + ordered=False, + pubs=EmptyI, + ): + """A generator function that produces tuples of the format + ((pub, stem, version), entry, actions) as it iterates over + the contents of the catalog (where 'metadata' is a dict + containing the requested information). + + 'metadata' always contains the following information at a + minimum: + + BASE + 'metadata' will be populated with Manifest + signature data, if available, using key-value + pairs of the form 'signature-': value. + + 'info_needed' is an optional list of one or more catalog + constants indicating the types of catalog data that will + be returned in 'metadata' in addition to the above: + + DEPENDENCY + 'metadata' will contain depend and set Actions + for package obsoletion, renaming, variants, + and facets stored in a list under the + key 'actions'. + + SUMMARY + 'metadata' will contain any remaining Actions + not listed above, such as pkg.summary, + pkg.description, etc. in a list under the key + 'actions'. + + 'last' is a boolean value that indicates only the last entry + for each package on a per-publisher basis should be returned. + As long as the catalog has been saved since the last modifying + operation, or finalize() has has been called, this will also be + the newest version of the package. + + 'locales' is an optional set of locale names for which Actions + should be returned. The default is set(('C',)) if not provided. + Note that unlike actions(), catalog entries will not lazy-load + action data if it is missing from the catalog. + + 'ordered' is an optional boolean value that indicates that + results should sorted by stem and then by publisher and + be in descending version order. If False, results will be + in a ascending version order on a per-publisher, per-stem + basis. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to.""" + + return self.__entries( + info_needed=info_needed, + locales=locales, + last_version=last, + ordered=ordered, + pubs=pubs, + tuples=True, + ) + + @property + def updates(self): + """A dict containing the list of known updates for the catalog + along with information about each update.""" + + return self._attrs.updates + + def update_entry(self, metadata, pfmri=None, pub=None, stem=None, ver=None): + """Updates the metadata stored in a package's BASE catalog + record for the specified package. Cannot be used when read_only + or log_updates is enabled; should never be used with a Catalog + intended for incremental update usage. + + 'metadata' must be a dict of additional metadata to store with + the package's BASE record. + + 'pfmri' is the FMRI of the package to update the entry for. + + 'pub' is the publisher of the package. + + 'stem' is the stem of the package. + + 'ver' is the version string of the package. + + 'pfmri' or 'pub', 'stem', and 'ver' must be provided. + """ + + assert pfmri or (pub and stem and ver) + assert not self.log_updates and not self.read_only + + base = self.get_part(self.__BASE_PART, must_exist=True) + if base is None: + if not pfmri: + pfmri = fmri.PkgFmri(name=stem, publisher=pub, version=ver) + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + + # get_entry returns the actual catalog entry, so updating it + # simply requires reassignment. + entry = base.get_entry(pfmri=pfmri, pub=pub, stem=stem, ver=ver) + if entry is None: + if not pfmri: + pfmri = fmri.PkgFmri(name=stem, publisher=pub, version=ver) + raise api_errors.UnknownCatalogEntry(pfmri.get_fmri()) + if metadata is None: + if "metadata" in entry: + del entry["metadata"] + return + entry["metadata"] = metadata + + op_time = datetime.datetime.utcnow() + attrs = self._attrs + attrs.last_modified = op_time + attrs.parts[base.name] = {"last-modified": op_time} + base.last_modified = op_time + + def validate(self, require_signatures=False): + """Verifies whether the signatures for the contents of the + catalog match the current signature data. Raises the + exception named 'BadCatalogSignatures' on failure.""" + + self._attrs.validate(require_signatures=require_signatures) + + def get_sigs(mdata): + sigs = {} + for key in mdata: + if not key.startswith("signature-"): + continue + sig = key.split("signature-")[1] + sigs[sig] = mdata[key] + if not sigs: + # Allow validate() to perform its own fallback + # logic if signature data isn't available. + return None + return sigs + + for name, mdata in six.iteritems(self._attrs.parts): + part = self.get_part(name, must_exist=True) + if part is None: + # Part does not exist; no validation needed. + continue + part.validate( + signatures=get_sigs(mdata), + require_signatures=require_signatures, + ) + + for name, mdata in six.iteritems(self._attrs.updates): + ulog = self.__get_update(name, cache=False, must_exist=True) + if ulog is None: + # Update does not exist; no validation needed. + continue + ulog.validate( + signatures=get_sigs(mdata), + require_signatures=require_signatures, + ) + + batch_mode = property(__get_batch_mode, __set_batch_mode) + last_modified = property( + __get_last_modified, + __set_last_modified, + doc="A UTC datetime object indicating the last time the catalog " + "was modified.", + ) + meta_root = property(__get_meta_root, __set_meta_root) + sign = property(__get_sign, __set_sign) + version = property(__get_version, __set_version) # Methods used by external callers def verify(filename): - """Convert the catalog part named by filename into the correct - type of Catalog object and then call its validate method to ensure - that is contents are self-consistent.""" + """Convert the catalog part named by filename into the correct + type of Catalog object and then call its validate method to ensure + that is contents are self-consistent.""" - path, fn = os.path.split(filename) - catobj = None + path, fn = os.path.split(filename) + catobj = None - if fn.startswith("catalog"): - if fn.endswith("attrs"): - catobj = CatalogAttrs(meta_root=path) - else: - catobj = CatalogPart(fn, meta_root=path) - elif fn.startswith("update"): - catobj = CatalogUpdate(fn, meta_root=path) + if fn.startswith("catalog"): + if fn.endswith("attrs"): + catobj = CatalogAttrs(meta_root=path) else: - # Unrecognized. - raise api_errors.UnrecognizedCatalogPart(fn) + catobj = CatalogPart(fn, meta_root=path) + elif fn.startswith("update"): + catobj = CatalogUpdate(fn, meta_root=path) + else: + # Unrecognized. + raise api_errors.UnrecognizedCatalogPart(fn) - # With the else case above, this should never be None. - assert catobj + # With the else case above, this should never be None. + assert catobj + + catobj.validate(require_signatures=True) - catobj.validate(require_signatures=True) # Methods used by Catalog classes. def datetime_to_ts(dt): - """Take datetime object dt, and convert it to a ts in ISO-8601 - format. """ + """Take datetime object dt, and convert it to a ts in ISO-8601 + format.""" + + return dt.isoformat() - return dt.isoformat() def datetime_to_basic_ts(dt): - """Take datetime object dt, and convert it to a ts in ISO-8601 - basic format. """ + """Take datetime object dt, and convert it to a ts in ISO-8601 + basic format.""" + + val = dt.isoformat() + val = val.replace("-", "") + val = val.replace(":", "") - val = dt.isoformat() - val = val.replace("-", "") - val = val.replace(":", "") + if not dt.tzname(): + # Assume UTC. + val += "Z" + return val - if not dt.tzname(): - # Assume UTC. - val += "Z" - return val def datetime_to_update_ts(dt): - """Take datetime object dt, and convert it to a ts in ISO-8601 - basic partial format. """ + """Take datetime object dt, and convert it to a ts in ISO-8601 + basic partial format.""" - val = dt.isoformat() - val = val.replace("-", "") - # Drop the minutes and seconds portion. - val = val.rsplit(":", 2)[0] - val = val.replace(":", "") + val = dt.isoformat() + val = val.replace("-", "") + # Drop the minutes and seconds portion. + val = val.rsplit(":", 2)[0] + val = val.replace(":", "") + + if not dt.tzname(): + # Assume UTC. + val += "Z" + return val - if not dt.tzname(): - # Assume UTC. - val += "Z" - return val def now_to_basic_ts(): - """Returns the current UTC time as timestamp in ISO-8601 basic - format.""" - return datetime_to_basic_ts(datetime.datetime.utcnow()) + """Returns the current UTC time as timestamp in ISO-8601 basic + format.""" + return datetime_to_basic_ts(datetime.datetime.utcnow()) + def now_to_update_ts(): - """Returns the current UTC time as timestamp in ISO-8601 basic - partial format.""" - return datetime_to_update_ts(datetime.datetime.utcnow()) + """Returns the current UTC time as timestamp in ISO-8601 basic + partial format.""" + return datetime_to_update_ts(datetime.datetime.utcnow()) + def ts_to_datetime(ts): - """Take timestamp ts in ISO-8601 format, and convert it to a - datetime object.""" - - year = int(ts[0:4]) - month = int(ts[5:7]) - day = int(ts[8:10]) - hour = int(ts[11:13]) - minutes = int(ts[14:16]) - sec = int(ts[17:19]) - # usec is not in the string if 0 - try: - usec = int(ts[20:26]) - except ValueError: - usec = 0 - return datetime.datetime(year, month, day, hour, minutes, sec, usec) + """Take timestamp ts in ISO-8601 format, and convert it to a + datetime object.""" + + year = int(ts[0:4]) + month = int(ts[5:7]) + day = int(ts[8:10]) + hour = int(ts[11:13]) + minutes = int(ts[14:16]) + sec = int(ts[17:19]) + # usec is not in the string if 0 + try: + usec = int(ts[20:26]) + except ValueError: + usec = 0 + return datetime.datetime(year, month, day, hour, minutes, sec, usec) + def basic_ts_to_datetime(ts): - """Take timestamp ts in ISO-8601 basic format, and convert it to a - datetime object.""" - - year = int(ts[0:4]) - month = int(ts[4:6]) - day = int(ts[6:8]) - hour = int(ts[9:11]) - minutes = int(ts[11:13]) - sec = int(ts[13:15]) - # usec is not in the string if 0 - try: - usec = int(ts[16:22]) - except ValueError: - usec = 0 - return datetime.datetime(year, month, day, hour, minutes, sec, usec) + """Take timestamp ts in ISO-8601 basic format, and convert it to a + datetime object.""" + + year = int(ts[0:4]) + month = int(ts[4:6]) + day = int(ts[6:8]) + hour = int(ts[9:11]) + minutes = int(ts[11:13]) + sec = int(ts[13:15]) + # usec is not in the string if 0 + try: + usec = int(ts[16:22]) + except ValueError: + usec = 0 + return datetime.datetime(year, month, day, hour, minutes, sec, usec) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/cfgfiles.py b/src/modules/cfgfiles.py index c3a2925df..5544923c8 100644 --- a/src/modules/cfgfiles.py +++ b/src/modules/cfgfiles.py @@ -40,13 +40,20 @@ import pkg.lockfile as lockfile -class CfgFile(object): - """ Solaris configuration file editor... make it easier to - modify Solaris line-oriented configuration files from actions """ - - def __init__(self, filename, separator, column_names, keys, - comment_match="#", continuation_lines=False): +class CfgFile(object): + """Solaris configuration file editor... make it easier to + modify Solaris line-oriented configuration files from actions""" + + def __init__( + self, + filename, + separator, + column_names, + keys, + comment_match="#", + continuation_lines=False, + ): self.filename = filename self.separator = separator self.continuation_lines = continuation_lines @@ -58,7 +65,7 @@ def __init__(self, filename, separator, column_names, keys, self.default_values = dict((e[2], e[1]) for e in l) self.comment_regexp = re.compile(comment_match) - self.max_lengths=dict((n, 8) for n in self.column_names) + self.max_lengths = dict((n, 8) for n in self.column_names) if isinstance(keys, str): self.keys = [keys] @@ -67,11 +74,12 @@ def __init__(self, filename, separator, column_names, keys, self.index = {} - assert(set(self.column_names) >= set(self.keys)) + assert set(self.column_names) >= set(self.keys) def __str__(self): return "CfgFile({0}):{1}:{2}:{3}".format( - self.filename, self.keys, self.column_names, self.index) + self.filename, self.keys, self.column_names, self.index + ) def getcolumnnames(self): return self.column_names @@ -83,19 +91,18 @@ def splitline(self, line): return line.split(self.separator) def getfilelines(self): - """ given self, return list of lines to be printed. - default impl preserves orignal + insertion order""" - lines = [[self.index[l][2],self.index[l][0]] for l in self.index] + """given self, return list of lines to be printed. + default impl preserves orignal + insertion order""" + lines = [[self.index[l][2], self.index[l][0]] for l in self.index] lines.sort() return [l[1] for l in lines] - def readfile(self): if os.path.exists(self.filename): - file = open(self.filename, 'rb') + file = open(self.filename, "rb") lineno = 1 for line in file: - linecnt = 1; + linecnt = 1 try: line = line.decode("utf-8") except UnicodeDecodeError: @@ -108,14 +115,16 @@ def readfile(self): line = line.rstrip("\n") if self.iscommentline(line): - self.index[lineno] = \ - (line, None, lineno) + self.index[lineno] = (line, None, lineno) else: cols = self.splitline(line) if len(cols) == len(self.column_names): dic = dict(zip(self.column_names, cols)) - self.index[tuple(dic[k] for k in self.keys)] = \ - (line, dic, lineno) + self.index[tuple(dic[k] for k in self.keys)] = ( + line, + dic, + lineno, + ) else: self.index[lineno] = (line, None, lineno) lineno += linecnt @@ -130,15 +139,17 @@ def getvalue(self, template): return {} def getdefaultvalues(self): - """ returns dictionary of default string values - ignores - other types """ - return dict((i, self.default_values[i]) - for i in self.default_values - if isinstance(self.default_values[i], str)) + """returns dictionary of default string values - ignores + other types""" + return dict( + (i, self.default_values[i]) + for i in self.default_values + if isinstance(self.default_values[i], str) + ) def updatevalue(self, template): - """ update existing record, using orig values if missing - in template""" + """update existing record, using orig values if missing + in template""" orig = self.index[tuple(template[k] for k in self.keys)].copy() for name in self.column_names: if name in template: @@ -146,14 +157,15 @@ def updatevalue(self, template): self.setvalue(orig) def setvalue(self, template): - """ set value of record in file, replacing any previous def. - for any missing info, use defaults. Will insert new value """ + """set value of record in file, replacing any previous def. + for any missing info, use defaults. Will insert new value""" # bring in any missing values as defaults if not None for field in self.column_names: if field not in template: if self.default_values[field] is None: raise RuntimeError( - "Required attribute {0} is missing".format(field)) + "Required attribute {0} is missing".format(field) + ) elif hasattr(self.default_values[field], "__call__"): template[field] = self.default_values[field]() else: @@ -169,8 +181,11 @@ def setvalue(self, template): else: lineno = 0 line = self.valuetostr(template) - self.index[tuple(template[k] for k in self.keys)] = \ - (line, template, lineno) + self.index[tuple(template[k] for k in self.keys)] = ( + line, + template, + lineno, + ) self.needswriting = True def removevalue(self, template): @@ -178,14 +193,14 @@ def removevalue(self, template): self.needswriting = True def valuetostr(self, template): - """ print out values in file format """ - return("{0}".format(self.separator.join( - [ - "{0}".format(template[key]) for key in self.column_names - ]))) + """print out values in file format""" + return "{0}".format( + self.separator.join( + ["{0}".format(template[key]) for key in self.column_names] + ) + ) def writefile(self): - if not self.needswriting: return @@ -205,51 +220,61 @@ def writefile(self): os.rename(name, self.filename) + class PasswordFile(CfgFile): """Manage the passwd and shadow together. Note that - insertion/deletion of +/- fields isn't supported""" + insertion/deletion of +/- fields isn't supported""" + def __init__(self, path_prefix, lock=False): - self.password_file = \ - CfgFile(os.path.join(path_prefix, "etc/passwd"), - ":", - {"username" : (1, None), - "password" : (2, "x"), - "uid" : (3, None), - "gid" : (4, None), - "gcos-field" : (5, "& User"), - "home-dir" : (6, "/"), - "login-shell": (7, "") - }, - "username", comment_match="[-+]") + self.password_file = CfgFile( + os.path.join(path_prefix, "etc/passwd"), + ":", + { + "username": (1, None), + "password": (2, "x"), + "uid": (3, None), + "gid": (4, None), + "gcos-field": (5, "& User"), + "home-dir": (6, "/"), + "login-shell": (7, ""), + }, + "username", + comment_match="[-+]", + ) days = datetime.timedelta(seconds=time.time()).days - self.shadow_file = \ - CfgFile(os.path.join(path_prefix, "etc/shadow"), - ":", - {"username" : (1, None), - "password" : (2, "*LK*"), - "lastchg" : (3, days), - "min" : (4, ""), - "max" : (5, ""), - "warn" : (6, ""), - "inactive" : (7, ""), - "expire" : (8, ""), - "flag" : (9, "") - }, - "username", comment_match="[-+]") + self.shadow_file = CfgFile( + os.path.join(path_prefix, "etc/shadow"), + ":", + { + "username": (1, None), + "password": (2, "*LK*"), + "lastchg": (3, days), + "min": (4, ""), + "max": (5, ""), + "warn": (6, ""), + "inactive": (7, ""), + "expire": (8, ""), + "flag": (9, ""), + }, + "username", + comment_match="[-+]", + ) self.path_prefix = path_prefix - self.lockfile = lockfile.LockFile(os.path.join(self.path_prefix, - "etc/.pwd.lock")) + self.lockfile = lockfile.LockFile( + os.path.join(self.path_prefix, "etc/.pwd.lock") + ) if lock: self.lock() self.readfile() self.password_file.default_values["uid"] = self.getnextuid() def __str__(self): - return "PasswordFile: [{0} {1}]".format(self.password_file, - self.shadow_file) + return "PasswordFile: [{0} {1}]".format( + self.password_file, self.shadow_file + ) def getvalue(self, template): - """ merge dbs... do passwd file first to get right passwd value""" + """merge dbs... do passwd file first to get right passwd value""" c = self.password_file.getvalue(template).copy() c.update(self.shadow_file.getvalue(template)) return c @@ -257,7 +282,7 @@ def getvalue(self, template): def updatevalue(self, template): copy = template.copy() if "password" in copy: - copy["password"]="" + copy["password"] = "" self.password_file.updatevalue(copy) self.shadow_file.updatevalue(template) @@ -265,7 +290,7 @@ def setvalue(self, template): # ignore attempts to set passwd for passwd file copy = template.copy() if "password" in copy: - copy["password"]="x" + copy["password"] = "x" self.password_file.setvalue(copy) self.shadow_file.setvalue(template) @@ -275,7 +300,7 @@ def removevalue(self, template): def getnextuid(self): """returns next free system (<=99) uid""" - uids=[] + uids = [] for t in six.itervalues(self.password_file.index): if t[1]: uids.append(t[1]["uid"]) @@ -298,7 +323,7 @@ def writefile(self): self.shadow_file.writefile() def getuser(self, username): - return self.getvalue({"username" : username}) + return self.getvalue({"username": username}) def getdefaultvalues(self): a = self.password_file.getdefaultvalues() @@ -311,25 +336,32 @@ def lock(self): def unlock(self): self.lockfile.unlock() + class GroupFile(CfgFile): - """ manage the group file""" + """manage the group file""" + def __init__(self, image): self.__image = image - CfgFile.__init__(self, os.path.join(image.get_root(), "etc/group"), - ":", - {"groupname" : (1, None), - "password" : (2, ""), - "gid" : (3, None), - "user-list" : (4, "") - }, - "groupname", comment_match="[+-]") + CfgFile.__init__( + self, + os.path.join(image.get_root(), "etc/group"), + ":", + { + "groupname": (1, None), + "password": (2, ""), + "gid": (3, None), + "user-list": (4, ""), + }, + "groupname", + comment_match="[+-]", + ) self.readfile() self.default_values["gid"] = self.getnextgid() def getnextgid(self): """returns next free system (<=99) gid""" - gids=[] + gids = [] for t in six.itervalues(self.index): if t[1]: gids.append(t[1]["gid"]) @@ -339,7 +371,7 @@ def getnextgid(self): raise RuntimeError("No free system gids") def adduser(self, groupname, username): - """"add named user to group; does not check if user exists""" + """ "add named user to group; does not check if user exists""" group = self.getvalue({"groupname": groupname}) # If the group isn't in the database, we'll add the user to the group, # but unless the group is being added in the same transaction, the group @@ -348,33 +380,36 @@ def adduser(self, groupname, username): group = { "groupname": groupname, "gid": self.__image._groupsbyname.get(groupname, ""), - "user-list": "" + "user-list": "", } - users = set(group["user-list"].replace(","," ").split()) + users = set(group["user-list"].replace(",", " ").split()) users.add(username) group["user-list"] = ",".join(users) self.setvalue(group) def subuser(self, groupname, username): - """ remove named user from group """ + """remove named user from group""" group = self.getvalue({"groupname": groupname}) if not group: raise RuntimeError("subuser: No such group {0}".format(groupname)) - users = set(group["user-list"].replace(","," ").split()) + users = set(group["user-list"].replace(",", " ").split()) if username not in users: - raise RuntimeError("User {0} not in group {1}".format( - username, groupname)) + raise RuntimeError( + "User {0} not in group {1}".format(username, groupname) + ) users.remove(username) group["user-list"] = ",".join(users) self.setvalue(group) def getgroups(self, username): - """ return list of additional groups user belongs to """ - return sorted([ + """return list of additional groups user belongs to""" + return sorted( + [ t[1]["groupname"] for t in self.index.values() if t[1] is not None and username in t[1]["user-list"].split(",") - ]) + ] + ) def setgroups(self, username, groups): current = self.getgroups(username) @@ -390,31 +425,32 @@ def removeuser(self, username): for g in self.getgroups(username): self.subuser(g, username) + class FtpusersFile(CfgFile): - """ If a username is present in this file, it denies that user + """If a username is present in this file, it denies that user the ability to use ftp""" def __init__(self, path_prefix): - - CfgFile.__init__(self, os.path.join(path_prefix, "etc/ftpd/ftpusers"), - " ", - {"username" : (1, None) - }, - "username") + CfgFile.__init__( + self, + os.path.join(path_prefix, "etc/ftpd/ftpusers"), + " ", + {"username": (1, None)}, + "username", + ) self.readfile() def getuser(self, username): - """ returns true if user is allowed to use FTP - ie is NOT in file""" - return not 'username' in self.getvalue({"username" : username}) - + """returns true if user is allowed to use FTP - ie is NOT in file""" + return not "username" in self.getvalue({"username": username}) def adduser(self, username): - """ add specified user to file, removing ability to use ftp""" - self.setvalue({"username" : username}) + """add specified user to file, removing ability to use ftp""" + self.setvalue({"username": username}) def subuser(self, username): - """ remove specified user from file """ - self.removevalue({"username" : username}) + """remove specified user from file""" + self.removevalue({"username": username}) def setuser(self, username, value): """Add or remove 'username' from the file to turn off or on the user's @@ -428,32 +464,38 @@ def setuser(self, username, value): elif value and not self.getuser(username): self.subuser(username) + class UserattrFile(CfgFile): - """ manage the userattr file """ + """manage the userattr file""" + def __init__(self, path_prefix): - CfgFile.__init__(self, os.path.join(path_prefix, "etc/user_attr"), - ":", - {"username" : (1, None), - "qualifier" : (2, ""), - "reserved1" : (3, ""), - "reserved2" : (4, ""), - "attributes" : (5, "") - }, - "username") + CfgFile.__init__( + self, + os.path.join(path_prefix, "etc/user_attr"), + ":", + { + "username": (1, None), + "qualifier": (2, ""), + "reserved1": (3, ""), + "reserved2": (4, ""), + "attributes": (5, ""), + }, + "username", + ) self.readfile() def iscommentline(self, line): return len(line) == 0 or self.comment_regexp.match(line) def splitline(self, line): - """ return tokenized line, with attribute column a dictionary - w/ lists for values""" - cols = re.split(r"(?<=[^\\]):", line) #match non-escaped : + """return tokenized line, with attribute column a dictionary + w/ lists for values""" + cols = re.split(r"(?<=[^\\]):", line) # match non-escaped : if len(cols) != len(self.column_names): return cols - attributes=re.split(r"(?<=[^\\]);", cols[4]) # match non escaped ; + attributes = re.split(r"(?<=[^\\]);", cols[4]) # match non escaped ; d = {} for attr in attributes: @@ -463,19 +505,22 @@ def splitline(self, line): return cols def valuetostr(self, template): - """ print out string; replace attribute dictionary with proper - string and use base class to convert entire record to a string """ - c = template.copy() # since we're mucking w/ this.... + """print out string; replace attribute dictionary with proper + string and use base class to convert entire record to a string""" + c = template.copy() # since we're mucking w/ this.... attrdict = c["attributes"] - str = "{0}".format(";".join( - [ - "{0}={1}".format(key, ",".join(attrdict[key])) - for key in attrdict - ])) + str = "{0}".format( + ";".join( + [ + "{0}={1}".format(key, ",".join(attrdict[key])) + for key in attrdict + ] + ) + ) c["attributes"] = str return CfgFile.valuetostr(self, c) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/choose.py b/src/modules/choose.py index 08f54b333..66bdd5044 100644 --- a/src/modules/choose.py +++ b/src/modules/choose.py @@ -8,22 +8,24 @@ import fnmatch import re + def choose(names, pat, case_sensitive): - """Return the subset of names that match pat. case_sensitive determines - whether the regexp is compiled to be case sensitive or not. - """ - # Derived from fnmatch.filter - result = [] - flag = 0 - # Setting the flag to re.I makes the regexp match using case - # insensitive rules. - if not case_sensitive: - flag = re.I - match = re.compile(fnmatch.translate(pat), flag).match - for name in names: - if match(name): - result.append(name) - return result + """Return the subset of names that match pat. case_sensitive determines + whether the regexp is compiled to be case sensitive or not. + """ + # Derived from fnmatch.filter + result = [] + flag = 0 + # Setting the flag to re.I makes the regexp match using case + # insensitive rules. + if not case_sensitive: + flag = re.I + match = re.compile(fnmatch.translate(pat), flag).match + for name in names: + if match(name): + result.append(name) + return result + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/__init__.py b/src/modules/client/__init__.py index 4fad0bbac..2b83d1b18 100644 --- a/src/modules/client/__init__.py +++ b/src/modules/client/__init__.py @@ -32,227 +32,245 @@ __all__ = ["global_settings"] + class _LogFilter(logging.Filter): - def __init__(self, max_level=logging.CRITICAL): - logging.Filter.__init__(self) - self.max_level = max_level + def __init__(self, max_level=logging.CRITICAL): + logging.Filter.__init__(self) + self.max_level = max_level - def filter(self, record): - return record.levelno <= self.max_level + def filter(self, record): + return record.levelno <= self.max_level class _StreamHandler(logging.StreamHandler): - """Simple subclass to ignore exceptions raised during logging output.""" + """Simple subclass to ignore exceptions raised during logging output.""" - def handleError(self, record): - # Ignore exceptions raised during output to stdout/stderr. - return + def handleError(self, record): + # Ignore exceptions raised during output to stdout/stderr. + return class GlobalSettings(object): - """ This class defines settings which are global - to the client instance """ - - def __init__(self): - object.__init__(self) - self.__info_log_handler = None - self.__error_log_handler = None - self.__verbose = False - - # - # These properties allow the client to describe how it - # has been asked to behave with respect to output. This - # allows subprocess invocations (e.g. for linked images) to - # discover from the global settings how they are expected - # to behave. - # - self.client_output_verbose = 0 - self.client_output_quiet = False - self.client_output_parsable_version = None - self.client_no_network_cache = False - - # runid, used by the pkg.1 client and the linked image - # subsystem when when generating temporary files. - self.client_runid = os.getpid() - - # file descriptor used by ProgressTracker classes when running - # "pkg remote" to indicate progress back to the parent/client - # process. - self.client_output_progfd = None - - # concurrency value used for linked image recursion - self.client_concurrency_set = False - self.client_concurrency_default = 1 - self.client_concurrency = self.client_concurrency_default - try: - self.client_concurrency = int(os.environ.get( - "PKG_CONCURRENCY", - self.client_concurrency_default)) - if "PKG_CONCURRENCY" in os.environ: - self.client_concurrency_set = True - # remove PKG_CONCURRENCY from the environment so child - # processes don't inherit it. - os.environ.pop("PKG_CONCURRENCY", None) - except ValueError: - pass - - self.client_name = None - self.client_args = sys.argv[:] - # Default maximum number of redirects received before - # aborting a connection. - self.pkg_client_max_redirect_default = 5 - # Default number of retries per-host - self.pkg_client_max_timeout_default = 4 - # Default number of seconds to give up if not connected - self.pkg_client_connect_timeout_default = 60 - # Default number of seconds beneath low-speed limit before - # giving up. - self.pkg_client_lowspeed_timeout_default = 30 - # Minimum bytes/sec before client thinks about giving up - # on connection. - self.pkg_client_lowspeed_limit = 1024 - # Maximum number of transient errors before we abort an - # endpoint. - self.pkg_client_max_consecutive_error_default = 4 - - # The location within the image of the cache for pkg.sysrepo(8) - self.sysrepo_pub_cache_path = \ - "var/cache/pkg/sysrepo_pub_cache.dat" - - try: - # Maximum number of timeouts before client gives up. - self.PKG_CLIENT_MAX_TIMEOUT = int(os.environ.get( - "PKG_CLIENT_MAX_TIMEOUT", - self.pkg_client_max_timeout_default)) - except ValueError: - self.PKG_CLIENT_MAX_TIMEOUT = \ - self.pkg_client_max_timeout_default - try: - # Number of seconds trying to connect before client - # aborts. - self.PKG_CLIENT_CONNECT_TIMEOUT = int(os.environ.get( - "PKG_CLIENT_CONNECT_TIMEOUT", - self.pkg_client_connect_timeout_default)) - except ValueError: - self.PKG_CLIENT_CONNECT_TIMEOUT = \ - self.pkg_client_connect_timeout_default - try: - # Number of seconds below lowspeed limit before - # transaction is aborted. - self.PKG_CLIENT_LOWSPEED_TIMEOUT = int(os.environ.get( - "PKG_CLIENT_LOWSPEED_TIMEOUT", - self.pkg_client_lowspeed_timeout_default)) - except ValueError: - self.PKG_CLIENT_LOWSPEED_TIMEOUT = \ - self.pkg_client_lowspeed_timeout_default - try: - # Number of transient errors before transaction - # is aborted. - self.PKG_CLIENT_MAX_CONSECUTIVE_ERROR = int( - os.environ.get("PKG_CLIENT_MAX_CONSECUTIVE_ERROR", - self.pkg_client_max_consecutive_error_default)) - except ValueError: - self.PKG_CLIENT_MAX_CONSECUTIVE_ERROR = \ - self.pkg_client_max_consecutive_error_default - try: - # Number of redirects before a connection is - # aborted. - self.PKG_CLIENT_MAX_REDIRECT = int( - os.environ.get("PKG_CLIENT_MAX_REDIRECT", - self.pkg_client_max_redirect_default)) - except ValueError: - self.PKG_CLIENT_MAX_REDIRECT = \ - self.pkg_client_max_redirect_default - self.reset_logging() - - def __get_error_log_handler(self): - return self.__error_log_handler - - def __get_info_log_handler(self): - return self.__info_log_handler - - def __get_verbose(self): - return self.__verbose - - def __set_error_log_handler(self, val): - logger = logging.getLogger("pkg") - if self.__error_log_handler: - logger.removeHandler(self.__error_log_handler) - self.__error_log_handler = val - if val: - logger.addHandler(val) - - def __set_info_log_handler(self, val): - logger = logging.getLogger("pkg") - if self.__info_log_handler: - logger.removeHandler(self.__info_log_handler) - self.__info_log_handler = val - if val: - logger.addHandler(val) - - def __set_verbose(self, val): - if self.__info_log_handler: - if val: - level = logging.DEBUG - else: - level = logging.INFO - self.__info_log_handler.setLevel(level) - self.__verbose = val - - @property - def logger(self): - # Method could be a function; pylint: disable=R0201 - return logging.getLogger("pkg") - - def reset_logging(self): - """Resets client logging to its default state. This will cause - all logging.INFO entries to go to sys.stdout, and all entries of - logging.WARNING or higher to go to sys.stderr.""" - - logger = logging.getLogger("pkg") - logger.setLevel(logging.DEBUG) - - # Don't pass messages that are rejected to the root logger. - logger.propagate = 0 - - # By default, log all informational messages, but not warnings - # and above to stdout. - info_h = _StreamHandler(sys.stdout) - - # Minimum logging level for informational messages. - if self.verbose: - info_h.setLevel(logging.DEBUG) - else: - info_h.setLevel(logging.INFO) - - log_fmt = logging.Formatter() - - # Enforce maximum logging level for informational messages. - info_f = _LogFilter(logging.INFO) - info_h.addFilter(info_f) - info_h.setFormatter(log_fmt) - logger.addHandler(info_h) - - # By default, log all warnings and above to stderr. - error_h = _StreamHandler(sys.stderr) - error_h.setFormatter(log_fmt) - error_h.setLevel(logging.WARNING) - logger.addHandler(error_h) - - # Stash the handles so they can be removed later. - self.info_log_handler = info_h - self.error_log_handler = error_h - - error_log_handler = property(__get_error_log_handler, - __set_error_log_handler) - - info_log_handler = property(__get_info_log_handler, - __set_info_log_handler) - - verbose = property(__get_verbose, __set_verbose) + """This class defines settings which are global + to the client instance""" + + def __init__(self): + object.__init__(self) + self.__info_log_handler = None + self.__error_log_handler = None + self.__verbose = False + + # + # These properties allow the client to describe how it + # has been asked to behave with respect to output. This + # allows subprocess invocations (e.g. for linked images) to + # discover from the global settings how they are expected + # to behave. + # + self.client_output_verbose = 0 + self.client_output_quiet = False + self.client_output_parsable_version = None + self.client_no_network_cache = False + + # runid, used by the pkg.1 client and the linked image + # subsystem when when generating temporary files. + self.client_runid = os.getpid() + + # file descriptor used by ProgressTracker classes when running + # "pkg remote" to indicate progress back to the parent/client + # process. + self.client_output_progfd = None + + # concurrency value used for linked image recursion + self.client_concurrency_set = False + self.client_concurrency_default = 1 + self.client_concurrency = self.client_concurrency_default + try: + self.client_concurrency = int( + os.environ.get( + "PKG_CONCURRENCY", self.client_concurrency_default + ) + ) + if "PKG_CONCURRENCY" in os.environ: + self.client_concurrency_set = True + # remove PKG_CONCURRENCY from the environment so child + # processes don't inherit it. + os.environ.pop("PKG_CONCURRENCY", None) + except ValueError: + pass + + self.client_name = None + self.client_args = sys.argv[:] + # Default maximum number of redirects received before + # aborting a connection. + self.pkg_client_max_redirect_default = 5 + # Default number of retries per-host + self.pkg_client_max_timeout_default = 4 + # Default number of seconds to give up if not connected + self.pkg_client_connect_timeout_default = 60 + # Default number of seconds beneath low-speed limit before + # giving up. + self.pkg_client_lowspeed_timeout_default = 30 + # Minimum bytes/sec before client thinks about giving up + # on connection. + self.pkg_client_lowspeed_limit = 1024 + # Maximum number of transient errors before we abort an + # endpoint. + self.pkg_client_max_consecutive_error_default = 4 + + # The location within the image of the cache for pkg.sysrepo(8) + self.sysrepo_pub_cache_path = "var/cache/pkg/sysrepo_pub_cache.dat" + + try: + # Maximum number of timeouts before client gives up. + self.PKG_CLIENT_MAX_TIMEOUT = int( + os.environ.get( + "PKG_CLIENT_MAX_TIMEOUT", + self.pkg_client_max_timeout_default, + ) + ) + except ValueError: + self.PKG_CLIENT_MAX_TIMEOUT = self.pkg_client_max_timeout_default + try: + # Number of seconds trying to connect before client + # aborts. + self.PKG_CLIENT_CONNECT_TIMEOUT = int( + os.environ.get( + "PKG_CLIENT_CONNECT_TIMEOUT", + self.pkg_client_connect_timeout_default, + ) + ) + except ValueError: + self.PKG_CLIENT_CONNECT_TIMEOUT = ( + self.pkg_client_connect_timeout_default + ) + try: + # Number of seconds below lowspeed limit before + # transaction is aborted. + self.PKG_CLIENT_LOWSPEED_TIMEOUT = int( + os.environ.get( + "PKG_CLIENT_LOWSPEED_TIMEOUT", + self.pkg_client_lowspeed_timeout_default, + ) + ) + except ValueError: + self.PKG_CLIENT_LOWSPEED_TIMEOUT = ( + self.pkg_client_lowspeed_timeout_default + ) + try: + # Number of transient errors before transaction + # is aborted. + self.PKG_CLIENT_MAX_CONSECUTIVE_ERROR = int( + os.environ.get( + "PKG_CLIENT_MAX_CONSECUTIVE_ERROR", + self.pkg_client_max_consecutive_error_default, + ) + ) + except ValueError: + self.PKG_CLIENT_MAX_CONSECUTIVE_ERROR = ( + self.pkg_client_max_consecutive_error_default + ) + try: + # Number of redirects before a connection is + # aborted. + self.PKG_CLIENT_MAX_REDIRECT = int( + os.environ.get( + "PKG_CLIENT_MAX_REDIRECT", + self.pkg_client_max_redirect_default, + ) + ) + except ValueError: + self.PKG_CLIENT_MAX_REDIRECT = self.pkg_client_max_redirect_default + self.reset_logging() + + def __get_error_log_handler(self): + return self.__error_log_handler + + def __get_info_log_handler(self): + return self.__info_log_handler + + def __get_verbose(self): + return self.__verbose + + def __set_error_log_handler(self, val): + logger = logging.getLogger("pkg") + if self.__error_log_handler: + logger.removeHandler(self.__error_log_handler) + self.__error_log_handler = val + if val: + logger.addHandler(val) + + def __set_info_log_handler(self, val): + logger = logging.getLogger("pkg") + if self.__info_log_handler: + logger.removeHandler(self.__info_log_handler) + self.__info_log_handler = val + if val: + logger.addHandler(val) + + def __set_verbose(self, val): + if self.__info_log_handler: + if val: + level = logging.DEBUG + else: + level = logging.INFO + self.__info_log_handler.setLevel(level) + self.__verbose = val + + @property + def logger(self): + # Method could be a function; pylint: disable=R0201 + return logging.getLogger("pkg") + + def reset_logging(self): + """Resets client logging to its default state. This will cause + all logging.INFO entries to go to sys.stdout, and all entries of + logging.WARNING or higher to go to sys.stderr.""" + + logger = logging.getLogger("pkg") + logger.setLevel(logging.DEBUG) + + # Don't pass messages that are rejected to the root logger. + logger.propagate = 0 + + # By default, log all informational messages, but not warnings + # and above to stdout. + info_h = _StreamHandler(sys.stdout) + + # Minimum logging level for informational messages. + if self.verbose: + info_h.setLevel(logging.DEBUG) + else: + info_h.setLevel(logging.INFO) + + log_fmt = logging.Formatter() + + # Enforce maximum logging level for informational messages. + info_f = _LogFilter(logging.INFO) + info_h.addFilter(info_f) + info_h.setFormatter(log_fmt) + logger.addHandler(info_h) + + # By default, log all warnings and above to stderr. + error_h = _StreamHandler(sys.stderr) + error_h.setFormatter(log_fmt) + error_h.setLevel(logging.WARNING) + logger.addHandler(error_h) + + # Stash the handles so they can be removed later. + self.info_log_handler = info_h + self.error_log_handler = error_h + + error_log_handler = property( + __get_error_log_handler, __set_error_log_handler + ) + + info_log_handler = property(__get_info_log_handler, __set_info_log_handler) + + verbose = property(__get_verbose, __set_verbose) global_settings = GlobalSettings() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/actuator.py b/src/modules/client/actuator.py index 23ae6450e..e0a5b4e2a 100644 --- a/src/modules/client/actuator.py +++ b/src/modules/client/actuator.py @@ -36,394 +36,432 @@ class Actuator(object): - """Actuators are action attributes that cause side effects - on live images when those actions are updated, installed - or removed. Since no side effects are caused when the - affected image isn't the current root image, the OS may - need to cause the equivalent effect during boot. - This is Solaris specific for now. """ - - # Each set of attributes listed below determines what attributes - # will be scanned for a given type of operation. These sets must - # match the logic found in exec_pre_actuators() and - # exec_post_actuators() below or planning output may be incorrect. - __install_actuator_attrs = set([ - "release-note", # conditionally include this file - # in release notes - "refresh_fmri", # refresh this service on any change - "restart_fmri", # restart this service on any change - ]) - - __update_actuator_attrs = set([ - "reboot-needed", # have to reboot to update this file - "release-note", # conditionally include this file - # in release notes - "refresh_fmri", # refresh this service on any change - "restart_fmri", # restart this service on any change - "suspend_fmri", # suspend this service during update - ]) - - __removal_actuator_attrs = set([ - "reboot-needed", # have to reboot to update this file - "refresh_fmri", # refresh this service on any change - "restart_fmri", # restart this service on any change - "disable_fmri" # disable this service prior to removal - ]) - - __state__desc = { - "install": { - "disable_fmri": set(), - "reboot-needed": set(), - "refresh_fmri": set(), - "release-note": [(pkg.actions.generic.NSG, pkg.fmri.PkgFmri)], - "restart_fmri": set(), - "suspend_fmri": set(), - }, - "removal": { - "disable_fmri": set(), - "reboot-needed": set(), - "refresh_fmri": set(), - "release-note": [(pkg.actions.generic.NSG, pkg.fmri.PkgFmri)], - "restart_fmri": set(), - "suspend_fmri": set(), - }, - "update": { - "disable_fmri": set(), - "reboot-needed": set(), - "refresh_fmri": set(), - "release-note": [(pkg.actions.generic.NSG, pkg.fmri.PkgFmri)], - "restart_fmri": set(), - "suspend_fmri": set(), - }, - } - - def __init__(self): - self.install = {} - self.removal = {} - self.update = {} - self.suspend_fmris = None - self.tmp_suspend_fmris = None - self.do_nothing = True - #self.cmd_path = "" - self.sync_timeout = 0 - self.act_timed_out = False - self.zone = None - - @staticmethod - def getstate(obj, je_state=None): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - return pkg.misc.json_encode(Actuator.__name__, obj.__dict__, - Actuator.__state__desc, je_state=je_state) - - @staticmethod - def setstate(obj, state, jd_state=None): - """Update the state of this object using previously serialized - state obtained via getstate().""" - - # get the name of the object we're dealing with - name = type(obj).__name__ - - # decode serialized state into python objects - state = pkg.misc.json_decode(name, state, - Actuator.__state__desc, jd_state=jd_state) - - # bulk update - obj.__dict__.update(state) - - @staticmethod - def fromstate(state, jd_state=None): - """Allocate a new object using previously serialized state - obtained via getstate().""" - rv = Actuator() - Actuator.setstate(rv, state, jd_state) - return rv - - def set_timeout(self, timeout): - """ Set actuator timeout. - 'timeout' Actuator timeout in seconds. The following - special values are allowed: - 0: don't use synchronous actuators - -1: no timeout, wait until finished - """ - self.sync_timeout = timeout - - def set_zone(self, zname): - """Specify if actuators are supposed to be run within a zone. - If 'zname' is None, actuators are run in the global zone, - otherwise actuators are run in the zone 'zname'. The caller has - to make sure the zone exists and is running. If there are any - issues with calling an actuator in the zone, it will be - ignored.""" - - self.zone = zname - - @property - def timed_out(self): - return self.act_timed_out - - # Defining "boolness" of a class, Python 2 uses the special method - # called __nonzero__() while Python 3 uses __bool__(). For Python - # 2 and 3 compatibility, define __bool__() only, and let - # __nonzero__ = __bool__ - def __bool__(self): - return bool(self.install) or bool(self.removal) or \ - bool(self.update) - - __nonzero__ = __bool__ - - # scan_* functions take ActionPlan arguments (see imageplan.py) - def scan_install(self, ap): - self.__scan(self.install, ap.dst, ap.p.destination_fmri, - self.__install_actuator_attrs) - - def scan_removal(self, ap): - self.__scan(self.removal, ap.src, ap.p.origin_fmri, - self.__removal_actuator_attrs) - - def scan_update(self, ap): - if ap.src: - self.__scan(self.update, ap.src, ap.p.destination_fmri, - self.__update_actuator_attrs) - self.__scan(self.update, ap.dst, ap.p.destination_fmri, - self.__update_actuator_attrs) - - def __scan(self, dictionary, act, fmri, actuator_attrs): - attrs = act.attrs - for a in set(attrs.keys()) & actuator_attrs: - if a != "release-note": - values = attrs[a] - if not isinstance(values, list): - values = [values] - dictionary.setdefault(a, set()).update(values) - else: - if act.name == "file": # ignore for non-files - dictionary.setdefault(a, list()).append( - (act, fmri)) - - def get_list(self): - """Returns a list of actuator value pairs, suitable for printing""" - def check_val(dfmri): - # For actuators which are a single, global function that - # needs to get executed, simply print true. - if hasattr(dfmri, "__call__") or isinstance(dfmri, list): - return [ "true" ] - else: - return dfmri - - merge = {} - for d in [self.removal, self.update, self.install]: - for a in d.keys(): - for smf in check_val(d[a]): - merge.setdefault(a, set()).add(smf) - - if self.reboot_needed(): - merge["reboot-needed"] = set(["true"]) - else: - merge["reboot-needed"] = set(["false"]) - return [(fmri, smf) - for fmri in merge - for smf in merge[fmri] - ] - - def get_release_note_info(self): - """Returns a list of tuples of possible release notes""" - return self.update.get("release-note", []) + \ - self.install.get("release-note", []) - - def get_services_list(self): - """Returns a list of services that would be restarted""" - return [(fmri, smf) for fmri, smf in self.get_list() - if smf not in ["true", "false"]] - - def __str__(self): - return "\n".join(" {0:>16}: {1:}".format(fmri, smf) - for fmri, smf in self.get_list()) - - def reboot_advised(self): - """Returns True if action install execution may require a - reboot.""" - - return bool("true" in self.install.get("reboot-needed", [])) - - def reboot_needed(self): - """Returns True if action execution requires a new boot - environment.""" - - return bool("true" in self.update.get("reboot-needed", [])) or \ - bool("true" in self.removal.get("reboot-needed", [])) - - def __invoke(self, func, *args, **kwargs): - """Execute SMF command. Remember if command timed out.""" - - if self.zone: - kwargs["zone"] = self.zone - - try: - func(*args, **kwargs) - except smf.NonzeroExitException as nze: - if nze.return_code == smf.EXIT_TIMEOUT: - self.act_timed_out = True - elif " ".join(nze.output).startswith("zlogin:"): - # Ignore zlogin errors; the worst which - # can happen is that an actuator is not run - # (disable is always run with -t). - # Since we only test once if the zone is - # runnning, this could happen if someone shuts - # down the zone while we are in the process of - # executing. - pass - else: - raise - - def exec_prep(self, image): - if not image.is_liveroot(): -# -# XXX don't create the marker file as illumos doesn't support self-assembly -# milestone -# -# # we're doing off-line pkg ops; we need -# # to support self-assembly milestone -# # so create the necessary marker file -# -# if image.type != IMG_USER: -# path = os.path.join(image.root, -# ".SELF-ASSEMBLY-REQUIRED") -# # create only if it doesn't exist -# -# if not os.path.exists(path): -# os.close(os.open(path, -# os.O_EXCL | -# os.O_CREAT | -# os.O_WRONLY)) - if not DebugValues.get_value("smf_cmds_dir") and \ - not self.zone: - return - - self.do_nothing = False - - def exec_pre_actuators(self, image): - """do pre execution actuator processing...""" - - if self.do_nothing: - return - - suspend_fmris = self.update.get("suspend_fmri", set()) - tmp_suspend_fmris = set() - - disable_fmris = self.removal.get("disable_fmri", set()) - - suspend_fmris = smf.check_fmris("suspend_fmri", suspend_fmris, - zone=self.zone) - disable_fmris = smf.check_fmris("disable_fmri", disable_fmris, - zone=self.zone) - # eliminate services not loaded or not running - # remember those services enabled only temporarily - - for fmri in suspend_fmris.copy(): - state = smf.get_state(fmri, zone=self.zone) - if state <= smf.SMF_SVC_TMP_ENABLED: - suspend_fmris.remove(fmri) - if state == smf.SMF_SVC_TMP_ENABLED: - tmp_suspend_fmris.add(fmri) - - for fmri in disable_fmris.copy(): - if smf.is_disabled(fmri, zone=self.zone): - disable_fmris.remove(fmri) - - self.suspend_fmris = suspend_fmris - self.tmp_suspend_fmris = tmp_suspend_fmris - - params = tuple(suspend_fmris | tmp_suspend_fmris) - - if params: - self.__invoke(smf.disable, params, temporary=True) - - params = tuple(disable_fmris) - - if params: - self.__invoke(smf.disable, params) - - def exec_fail_actuators(self, image): - """handle a failed install""" - - if self.do_nothing: - return - - params = tuple(self.suspend_fmris | - self.tmp_suspend_fmris) - - if params: - self.__invoke(smf.mark, "maintenance", params) - - def exec_post_actuators(self, image): - """do post execution actuator processing""" - - if self.do_nothing: - return - - # handle callables first - - for act in six.itervalues(self.removal): - if hasattr(act, "__call__"): - act() - - for act in six.itervalues(self.install): - if hasattr(act, "__call__"): - act() - - for act in six.itervalues(self.update): - if hasattr(act, "__call__"): - act() - - - refresh_fmris = self.removal.get("refresh_fmri", set()) | \ - self.update.get("refresh_fmri", set()) | \ - self.install.get("refresh_fmri", set()) - - restart_fmris = self.removal.get("restart_fmri", set()) | \ - self.update.get("restart_fmri", set()) | \ - self.install.get("restart_fmri", set()) + """Actuators are action attributes that cause side effects + on live images when those actions are updated, installed + or removed. Since no side effects are caused when the + affected image isn't the current root image, the OS may + need to cause the equivalent effect during boot. + This is Solaris specific for now.""" + + # Each set of attributes listed below determines what attributes + # will be scanned for a given type of operation. These sets must + # match the logic found in exec_pre_actuators() and + # exec_post_actuators() below or planning output may be incorrect. + __install_actuator_attrs = set( + [ + "release-note", # conditionally include this file + # in release notes + "refresh_fmri", # refresh this service on any change + "restart_fmri", # restart this service on any change + ] + ) + + __update_actuator_attrs = set( + [ + "reboot-needed", # have to reboot to update this file + "release-note", # conditionally include this file + # in release notes + "refresh_fmri", # refresh this service on any change + "restart_fmri", # restart this service on any change + "suspend_fmri", # suspend this service during update + ] + ) + + __removal_actuator_attrs = set( + [ + "reboot-needed", # have to reboot to update this file + "refresh_fmri", # refresh this service on any change + "restart_fmri", # restart this service on any change + "disable_fmri", # disable this service prior to removal + ] + ) + + __state__desc = { + "install": { + "disable_fmri": set(), + "reboot-needed": set(), + "refresh_fmri": set(), + "release-note": [(pkg.actions.generic.NSG, pkg.fmri.PkgFmri)], + "restart_fmri": set(), + "suspend_fmri": set(), + }, + "removal": { + "disable_fmri": set(), + "reboot-needed": set(), + "refresh_fmri": set(), + "release-note": [(pkg.actions.generic.NSG, pkg.fmri.PkgFmri)], + "restart_fmri": set(), + "suspend_fmri": set(), + }, + "update": { + "disable_fmri": set(), + "reboot-needed": set(), + "refresh_fmri": set(), + "release-note": [(pkg.actions.generic.NSG, pkg.fmri.PkgFmri)], + "restart_fmri": set(), + "suspend_fmri": set(), + }, + } + + def __init__(self): + self.install = {} + self.removal = {} + self.update = {} + self.suspend_fmris = None + self.tmp_suspend_fmris = None + self.do_nothing = True + # self.cmd_path = "" + self.sync_timeout = 0 + self.act_timed_out = False + self.zone = None + + @staticmethod + def getstate(obj, je_state=None): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + return pkg.misc.json_encode( + Actuator.__name__, + obj.__dict__, + Actuator.__state__desc, + je_state=je_state, + ) + + @staticmethod + def setstate(obj, state, jd_state=None): + """Update the state of this object using previously serialized + state obtained via getstate().""" + + # get the name of the object we're dealing with + name = type(obj).__name__ + + # decode serialized state into python objects + state = pkg.misc.json_decode( + name, state, Actuator.__state__desc, jd_state=jd_state + ) + + # bulk update + obj.__dict__.update(state) + + @staticmethod + def fromstate(state, jd_state=None): + """Allocate a new object using previously serialized state + obtained via getstate().""" + rv = Actuator() + Actuator.setstate(rv, state, jd_state) + return rv + + def set_timeout(self, timeout): + """Set actuator timeout. + 'timeout' Actuator timeout in seconds. The following + special values are allowed: + 0: don't use synchronous actuators + -1: no timeout, wait until finished + """ + self.sync_timeout = timeout + + def set_zone(self, zname): + """Specify if actuators are supposed to be run within a zone. + If 'zname' is None, actuators are run in the global zone, + otherwise actuators are run in the zone 'zname'. The caller has + to make sure the zone exists and is running. If there are any + issues with calling an actuator in the zone, it will be + ignored.""" + + self.zone = zname + + @property + def timed_out(self): + return self.act_timed_out + + # Defining "boolness" of a class, Python 2 uses the special method + # called __nonzero__() while Python 3 uses __bool__(). For Python + # 2 and 3 compatibility, define __bool__() only, and let + # __nonzero__ = __bool__ + def __bool__(self): + return bool(self.install) or bool(self.removal) or bool(self.update) + + __nonzero__ = __bool__ + + # scan_* functions take ActionPlan arguments (see imageplan.py) + def scan_install(self, ap): + self.__scan( + self.install, + ap.dst, + ap.p.destination_fmri, + self.__install_actuator_attrs, + ) + + def scan_removal(self, ap): + self.__scan( + self.removal, + ap.src, + ap.p.origin_fmri, + self.__removal_actuator_attrs, + ) + + def scan_update(self, ap): + if ap.src: + self.__scan( + self.update, + ap.src, + ap.p.destination_fmri, + self.__update_actuator_attrs, + ) + self.__scan( + self.update, + ap.dst, + ap.p.destination_fmri, + self.__update_actuator_attrs, + ) + + def __scan(self, dictionary, act, fmri, actuator_attrs): + attrs = act.attrs + for a in set(attrs.keys()) & actuator_attrs: + if a != "release-note": + values = attrs[a] + if not isinstance(values, list): + values = [values] + dictionary.setdefault(a, set()).update(values) + else: + if act.name == "file": # ignore for non-files + dictionary.setdefault(a, list()).append((act, fmri)) + + def get_list(self): + """Returns a list of actuator value pairs, suitable for printing""" + + def check_val(dfmri): + # For actuators which are a single, global function that + # needs to get executed, simply print true. + if hasattr(dfmri, "__call__") or isinstance(dfmri, list): + return ["true"] + else: + return dfmri + + merge = {} + for d in [self.removal, self.update, self.install]: + for a in d.keys(): + for smf in check_val(d[a]): + merge.setdefault(a, set()).add(smf) + + if self.reboot_needed(): + merge["reboot-needed"] = set(["true"]) + else: + merge["reboot-needed"] = set(["false"]) + return [(fmri, smf) for fmri in merge for smf in merge[fmri]] + + def get_release_note_info(self): + """Returns a list of tuples of possible release notes""" + return self.update.get("release-note", []) + self.install.get( + "release-note", [] + ) + + def get_services_list(self): + """Returns a list of services that would be restarted""" + return [ + (fmri, smf) + for fmri, smf in self.get_list() + if smf not in ["true", "false"] + ] + + def __str__(self): + return "\n".join( + " {0:>16}: {1:}".format(fmri, smf) for fmri, smf in self.get_list() + ) + + def reboot_advised(self): + """Returns True if action install execution may require a + reboot.""" + + return bool("true" in self.install.get("reboot-needed", [])) + + def reboot_needed(self): + """Returns True if action execution requires a new boot + environment.""" + + return bool("true" in self.update.get("reboot-needed", [])) or bool( + "true" in self.removal.get("reboot-needed", []) + ) + + def __invoke(self, func, *args, **kwargs): + """Execute SMF command. Remember if command timed out.""" + + if self.zone: + kwargs["zone"] = self.zone + + try: + func(*args, **kwargs) + except smf.NonzeroExitException as nze: + if nze.return_code == smf.EXIT_TIMEOUT: + self.act_timed_out = True + elif " ".join(nze.output).startswith("zlogin:"): + # Ignore zlogin errors; the worst which + # can happen is that an actuator is not run + # (disable is always run with -t). + # Since we only test once if the zone is + # runnning, this could happen if someone shuts + # down the zone while we are in the process of + # executing. + pass + else: + raise + + def exec_prep(self, image): + if not image.is_liveroot(): + # + # XXX don't create the marker file as illumos doesn't support self-assembly + # milestone + # + # # we're doing off-line pkg ops; we need + # # to support self-assembly milestone + # # so create the necessary marker file + # + # if image.type != IMG_USER: + # path = os.path.join(image.root, + # ".SELF-ASSEMBLY-REQUIRED") + # # create only if it doesn't exist + # + # if not os.path.exists(path): + # os.close(os.open(path, + # os.O_EXCL | + # os.O_CREAT | + # os.O_WRONLY)) + if not DebugValues.get_value("smf_cmds_dir") and not self.zone: + return + + self.do_nothing = False + + def exec_pre_actuators(self, image): + """do pre execution actuator processing...""" + + if self.do_nothing: + return + + suspend_fmris = self.update.get("suspend_fmri", set()) + tmp_suspend_fmris = set() + + disable_fmris = self.removal.get("disable_fmri", set()) + + suspend_fmris = smf.check_fmris( + "suspend_fmri", suspend_fmris, zone=self.zone + ) + disable_fmris = smf.check_fmris( + "disable_fmri", disable_fmris, zone=self.zone + ) + # eliminate services not loaded or not running + # remember those services enabled only temporarily + + for fmri in suspend_fmris.copy(): + state = smf.get_state(fmri, zone=self.zone) + if state <= smf.SMF_SVC_TMP_ENABLED: + suspend_fmris.remove(fmri) + if state == smf.SMF_SVC_TMP_ENABLED: + tmp_suspend_fmris.add(fmri) + + for fmri in disable_fmris.copy(): + if smf.is_disabled(fmri, zone=self.zone): + disable_fmris.remove(fmri) + + self.suspend_fmris = suspend_fmris + self.tmp_suspend_fmris = tmp_suspend_fmris + + params = tuple(suspend_fmris | tmp_suspend_fmris) + + if params: + self.__invoke(smf.disable, params, temporary=True) + + params = tuple(disable_fmris) + + if params: + self.__invoke(smf.disable, params) + + def exec_fail_actuators(self, image): + """handle a failed install""" + + if self.do_nothing: + return + + params = tuple(self.suspend_fmris | self.tmp_suspend_fmris) + + if params: + self.__invoke(smf.mark, "maintenance", params) + + def exec_post_actuators(self, image): + """do post execution actuator processing""" + + if self.do_nothing: + return + + # handle callables first + + for act in six.itervalues(self.removal): + if hasattr(act, "__call__"): + act() + + for act in six.itervalues(self.install): + if hasattr(act, "__call__"): + act() + + for act in six.itervalues(self.update): + if hasattr(act, "__call__"): + act() + + refresh_fmris = ( + self.removal.get("refresh_fmri", set()) + | self.update.get("refresh_fmri", set()) + | self.install.get("refresh_fmri", set()) + ) + + restart_fmris = ( + self.removal.get("restart_fmri", set()) + | self.update.get("restart_fmri", set()) + | self.install.get("restart_fmri", set()) + ) - refresh_fmris = smf.check_fmris("refresh_fmri", refresh_fmris, - zone=self.zone) - restart_fmris = smf.check_fmris("restart_fmri", restart_fmris, - zone=self.zone) + refresh_fmris = smf.check_fmris( + "refresh_fmri", refresh_fmris, zone=self.zone + ) + restart_fmris = smf.check_fmris( + "restart_fmri", restart_fmris, zone=self.zone + ) - # ignore services not present or not - # enabled + # ignore services not present or not + # enabled - for fmri in refresh_fmris.copy(): - if smf.is_disabled(fmri, zone=self.zone): - refresh_fmris.remove(fmri) + for fmri in refresh_fmris.copy(): + if smf.is_disabled(fmri, zone=self.zone): + refresh_fmris.remove(fmri) - params = tuple(refresh_fmris) + params = tuple(refresh_fmris) - if params: - self.__invoke(smf.refresh, params, sync_timeout=self.sync_timeout) + if params: + self.__invoke(smf.refresh, params, sync_timeout=self.sync_timeout) - for fmri in restart_fmris.copy(): - if smf.is_disabled(fmri, zone=self.zone): - restart_fmris.remove(fmri) + for fmri in restart_fmris.copy(): + if smf.is_disabled(fmri, zone=self.zone): + restart_fmris.remove(fmri) - params = tuple(restart_fmris) - if params: - self.__invoke(smf.restart, params, sync_timeout=self.sync_timeout) + params = tuple(restart_fmris) + if params: + self.__invoke(smf.restart, params, sync_timeout=self.sync_timeout) - # reenable suspended services that were running - # be sure to not enable services that weren't running - # and temp. enable those services that were in that - # state. + # reenable suspended services that were running + # be sure to not enable services that weren't running + # and temp. enable those services that were in that + # state. - params = tuple(self.suspend_fmris) - if params: - self.__invoke(smf.enable, params, sync_timeout=self.sync_timeout) + params = tuple(self.suspend_fmris) + if params: + self.__invoke(smf.enable, params, sync_timeout=self.sync_timeout) - params = tuple(self.tmp_suspend_fmris) - if params: - self.__invoke(smf.enable, params, temporary=True, - sync_timeout=self.sync_timeout) + params = tuple(self.tmp_suspend_fmris) + if params: + self.__invoke( + smf.enable, + params, + temporary=True, + sync_timeout=self.sync_timeout, + ) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/api.py b/src/modules/client/api.py index d213d8ad9..33429d3a8 100644 --- a/src/modules/client/api.py +++ b/src/modules/client/api.py @@ -98,28 +98,33 @@ import pkg.search_errors as search_errors import pkg.version -from pkg.api_common import (PackageInfo, LicenseInfo, PackageCategory, - _get_pkg_cat_data) +from pkg.api_common import ( + PackageInfo, + LicenseInfo, + PackageCategory, + _get_pkg_cat_data, +) from pkg.client import global_settings from pkg.client.debugvalues import DebugValues -from pkg.client.pkgdefs import * # pylint: disable=W0401 +from pkg.client.pkgdefs import * # pylint: disable=W0401 from pkg.smf import NonzeroExitException # we import PlanDescription here even though it isn't used so that consumers # of the api still have access to the class definition and are able to do # things like help(pkg.client.api.PlanDescription) -from pkg.client.plandesc import PlanDescription # pylint: disable=W0611 +from pkg.client.plandesc import PlanDescription # pylint: disable=W0611 CURRENT_API_VERSION = 84 -COMPATIBLE_API_VERSIONS = frozenset([72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, CURRENT_API_VERSION]) +COMPATIBLE_API_VERSIONS = frozenset( + [72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, CURRENT_API_VERSION] +) CURRENT_P5I_VERSION = 1 # Image type constants. -IMG_TYPE_NONE = imgtypes.IMG_NONE # No image. -IMG_TYPE_ENTIRE = imgtypes.IMG_ENTIRE # Full image ('/'). +IMG_TYPE_NONE = imgtypes.IMG_NONE # No image. +IMG_TYPE_ENTIRE = imgtypes.IMG_ENTIRE # Full image ('/'). IMG_TYPE_PARTIAL = imgtypes.IMG_PARTIAL # Not yet implemented. -IMG_TYPE_USER = imgtypes.IMG_USER # Not '/'; some other location. +IMG_TYPE_USER = imgtypes.IMG_USER # Not '/'; some other location. # History result constants. RESULT_CANCELED = history.RESULT_CANCELED @@ -143,6007 +148,6626 @@ # Globals. logger = global_settings.logger + class _LockedGenerator(object): - """This is a private class and should not be used by API consumers. + """This is a private class and should not be used by API consumers. + + This decorator class wraps API generator functions, managing the + activity and cancelation locks. Due to implementation differences + in the decorator protocol, the decorator must be used with + parenthesis in order for this to function correctly. Always + decorate functions @_LockedGenerator().""" + + def __init__(self, *d_args, **d_kwargs): + object.__init__(self) + + def __call__(self, f): + def wrapper(*fargs, **f_kwargs): + instance, fargs = fargs[0], fargs[1:] + instance._acquire_activity_lock() + instance._enable_cancel() + + clean_exit = True + canceled = False + try: + for v in f(instance, *fargs, **f_kwargs): + yield v + except GeneratorExit: + return + except apx.CanceledException: + canceled = True + raise + except Exception: + clean_exit = False + raise + finally: + if canceled: + instance._cancel_done() + elif clean_exit: + try: + instance._disable_cancel() + except apx.CanceledException: + instance._cancel_done() + instance._activity_lock.release() + raise + else: + instance._cancel_cleanup_exception() + instance._activity_lock.release() - This decorator class wraps API generator functions, managing the - activity and cancelation locks. Due to implementation differences - in the decorator protocol, the decorator must be used with - parenthesis in order for this to function correctly. Always - decorate functions @_LockedGenerator().""" + return wrapper - def __init__(self, *d_args, **d_kwargs): - object.__init__(self) - def __call__(self, f): - def wrapper(*fargs, **f_kwargs): - instance, fargs = fargs[0], fargs[1:] - instance._acquire_activity_lock() - instance._enable_cancel() +class _LockedCancelable(object): + """This is a private class and should not be used by API consumers. + + This decorator class wraps non-generator cancelable API functions, + managing the activity and cancelation locks. Due to implementation + differences in the decorator protocol, the decorator must be used with + parenthesis in order for this to function correctly. Always + decorate functions @_LockedCancelable().""" + + def __init__(self, *d_args, **d_kwargs): + object.__init__(self) + + def __call__(self, f): + def wrapper(*fargs, **f_kwargs): + instance, fargs = fargs[0], fargs[1:] + instance._acquire_activity_lock() + instance._enable_cancel() + + clean_exit = True + canceled = False + try: + return f(instance, *fargs, **f_kwargs) + except apx.CanceledException: + canceled = True + raise + except Exception: + clean_exit = False + raise + finally: + instance._img.cleanup_downloads() + try: + if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: + instance._img.transport.stats.dump() + except ValueError: + # Don't generate stats if an invalid + # value is supplied. + pass + + if canceled: + instance._cancel_done() + elif clean_exit: + try: + instance._disable_cancel() + except apx.CanceledException: + instance._cancel_done() + # if f() acquired the image + # lock, drop it + if instance._img.locked: + instance._img.unlock() + instance._activity_lock.release() + raise + else: + instance._cancel_cleanup_exception() + # if f() acquired the image lock, drop it + if instance._img.locked: + instance._img.unlock() + instance._activity_lock.release() - clean_exit = True - canceled = False - try: - for v in f(instance, *fargs, **f_kwargs): - yield v - except GeneratorExit: - return - except apx.CanceledException: - canceled = True - raise - except Exception: - clean_exit = False - raise - finally: - if canceled: - instance._cancel_done() - elif clean_exit: - try: - instance._disable_cancel() - except apx.CanceledException: - instance._cancel_done() - instance._activity_lock.release() - raise - else: - instance._cancel_cleanup_exception() - instance._activity_lock.release() - - return wrapper + return wrapper -class _LockedCancelable(object): - """This is a private class and should not be used by API consumers. +class ImageInterface(object): + """This class presents an interface to images that clients may use. + There is a specific order of methods which must be used to install + or uninstall packages, or update an image. First, a gen_plan_* method + must be called. After that method completes successfully, describe may + be called, and prepare must be called. Finally, execute_plan may be + called to implement the previous created plan. The other methods + do not have an ordering imposed upon them, and may be used as + needed. Cancel may only be invoked while a cancelable method is + running.""" + + FACET_ALL = 0 + FACET_IMAGE = 1 + FACET_INSTALLED = 2 + + FACET_SRC_SYSTEM = pkg.facet.Facets.FACET_SRC_SYSTEM + FACET_SRC_LOCAL = pkg.facet.Facets.FACET_SRC_LOCAL + FACET_SRC_PARENT = pkg.facet.Facets.FACET_SRC_PARENT + + # Constants used to reference specific values that info can return. + INFO_FOUND = 0 + INFO_MISSING = 1 + INFO_ILLEGALS = 3 + + LIST_ALL = 0 + LIST_INSTALLED = 1 + LIST_INSTALLED_NEWEST = 2 + LIST_NEWEST = 3 + LIST_UPGRADABLE = 4 + LIST_REMOVABLE = 5 + + MATCH_EXACT = 0 + MATCH_FMRI = 1 + MATCH_GLOB = 2 + + VARIANT_ALL = 0 + VARIANT_ALL_POSSIBLE = 1 + VARIANT_IMAGE = 2 + VARIANT_IMAGE_POSSIBLE = 3 + VARIANT_INSTALLED = 4 + VARIANT_INSTALLED_POSSIBLE = 5 + + def __init__( + self, + img_path, + version_id, + progresstracker, + cancel_state_callable, + pkg_client_name, + exact_match=True, + cmdpath=None, + ): + """Constructs an ImageInterface object. + + 'img_path' is the absolute path to an existing image or to a + path from which to start looking for an image. To control this + behaviour use the 'exact_match' parameter. - This decorator class wraps non-generator cancelable API functions, - managing the activity and cancelation locks. Due to implementation - differences in the decorator protocol, the decorator must be used with - parenthesis in order for this to function correctly. Always - decorate functions @_LockedCancelable().""" + 'version_id' indicates the version of the api the client is + expecting to use. - def __init__(self, *d_args, **d_kwargs): - object.__init__(self) + 'progresstracker' is the ProgressTracker object the client wants + the api to use for UI progress callbacks. - def __call__(self, f): - def wrapper(*fargs, **f_kwargs): - instance, fargs = fargs[0], fargs[1:] - instance._acquire_activity_lock() - instance._enable_cancel() + 'cancel_state_callable' is an optional function reference that + will be called if the cancellable status of an operation + changes. - clean_exit = True - canceled = False - try: - return f(instance, *fargs, **f_kwargs) - except apx.CanceledException: - canceled = True - raise - except Exception: - clean_exit = False - raise - finally: - instance._img.cleanup_downloads() - try: - if int(os.environ.get("PKG_DUMP_STATS", - 0)) > 0: - instance._img.transport.stats.dump() - except ValueError: - # Don't generate stats if an invalid - # value is supplied. - pass - - if canceled: - instance._cancel_done() - elif clean_exit: - try: - instance._disable_cancel() - except apx.CanceledException: - instance._cancel_done() - # if f() acquired the image - # lock, drop it - if instance._img.locked: - instance._img.unlock() - instance._activity_lock.release() - raise - else: - instance._cancel_cleanup_exception() - # if f() acquired the image lock, drop it - if instance._img.locked: - instance._img.unlock() - instance._activity_lock.release() - - return wrapper + 'pkg_client_name' is a string containing the name of the client, + such as "pkg". + 'exact_match' is a boolean indicating whether the API should + attempt to find a usable image starting from the specified + directory, going up to the filesystem root until it finds one. + If set to True, an image must exist at the location indicated + by 'img_path'. + """ -class ImageInterface(object): - """This class presents an interface to images that clients may use. - There is a specific order of methods which must be used to install - or uninstall packages, or update an image. First, a gen_plan_* method - must be called. After that method completes successfully, describe may - be called, and prepare must be called. Finally, execute_plan may be - called to implement the previous created plan. The other methods - do not have an ordering imposed upon them, and may be used as - needed. Cancel may only be invoked while a cancelable method is - running.""" - - FACET_ALL = 0 - FACET_IMAGE = 1 - FACET_INSTALLED = 2 - - FACET_SRC_SYSTEM = pkg.facet.Facets.FACET_SRC_SYSTEM - FACET_SRC_LOCAL = pkg.facet.Facets.FACET_SRC_LOCAL - FACET_SRC_PARENT = pkg.facet.Facets.FACET_SRC_PARENT - - # Constants used to reference specific values that info can return. - INFO_FOUND = 0 - INFO_MISSING = 1 - INFO_ILLEGALS = 3 - - LIST_ALL = 0 - LIST_INSTALLED = 1 - LIST_INSTALLED_NEWEST = 2 - LIST_NEWEST = 3 - LIST_UPGRADABLE = 4 - LIST_REMOVABLE = 5 - - MATCH_EXACT = 0 - MATCH_FMRI = 1 - MATCH_GLOB = 2 - - VARIANT_ALL = 0 - VARIANT_ALL_POSSIBLE = 1 - VARIANT_IMAGE = 2 - VARIANT_IMAGE_POSSIBLE = 3 - VARIANT_INSTALLED = 4 - VARIANT_INSTALLED_POSSIBLE = 5 - - def __init__(self, img_path, version_id, progresstracker, - cancel_state_callable, pkg_client_name, exact_match=True, - cmdpath=None): - """Constructs an ImageInterface object. - - 'img_path' is the absolute path to an existing image or to a - path from which to start looking for an image. To control this - behaviour use the 'exact_match' parameter. - - 'version_id' indicates the version of the api the client is - expecting to use. - - 'progresstracker' is the ProgressTracker object the client wants - the api to use for UI progress callbacks. - - 'cancel_state_callable' is an optional function reference that - will be called if the cancellable status of an operation - changes. - - 'pkg_client_name' is a string containing the name of the client, - such as "pkg". - - 'exact_match' is a boolean indicating whether the API should - attempt to find a usable image starting from the specified - directory, going up to the filesystem root until it finds one. - If set to True, an image must exist at the location indicated - by 'img_path'. + if version_id not in COMPATIBLE_API_VERSIONS: + raise apx.VersionException(CURRENT_API_VERSION, version_id) + + if sys.path[0].startswith("/dev/fd/"): + # + # Normally when the kernel forks off an interpreted + # program, it executes the interpreter with the first + # argument being the path to the interpreted program + # we're executing. But in the case of suid scripts + # this presents a security problem because that path + # could be updated after exec but before the + # interpreter opens reads the program. To avoid this + # race, for suid script the kernel replaces the name + # of the interpreted program with /dev/fd/###, and + # opens the interpreted program such that it can be + # read from the specified file descriptor device node. + # So if we detect that path[0] (which should be then + # interpreted program name) is a /dev/fd/ path, that + # means we're being run as an suid script, which we + # don't really want to support. (Since this breaks + # our subsequent code that attempt to determine the + # name of the executable we are running as.) + # + raise apx.SuidUnsupportedError() + + # The image's History object will use client_name from + # global_settings, but if the program forgot to set it, + # we'll go ahead and do so here. + if global_settings.client_name is None: + global_settings.client_name = pkg_client_name + + if cmdpath is None: + cmdpath = misc.api_cmdpath() + self.cmdpath = cmdpath + + # prevent brokeness in the test suite + if ( + self.cmdpath + and "PKG_NO_RUNPY_CMDPATH" in os.environ + and self.cmdpath.endswith(os.sep + "run.py") + ): + raise RuntimeError( """ - - if version_id not in COMPATIBLE_API_VERSIONS: - raise apx.VersionException(CURRENT_API_VERSION, - version_id) - - if sys.path[0].startswith("/dev/fd/"): - # - # Normally when the kernel forks off an interpreted - # program, it executes the interpreter with the first - # argument being the path to the interpreted program - # we're executing. But in the case of suid scripts - # this presents a security problem because that path - # could be updated after exec but before the - # interpreter opens reads the program. To avoid this - # race, for suid script the kernel replaces the name - # of the interpreted program with /dev/fd/###, and - # opens the interpreted program such that it can be - # read from the specified file descriptor device node. - # So if we detect that path[0] (which should be then - # interpreted program name) is a /dev/fd/ path, that - # means we're being run as an suid script, which we - # don't really want to support. (Since this breaks - # our subsequent code that attempt to determine the - # name of the executable we are running as.) - # - raise apx.SuidUnsupportedError() - - # The image's History object will use client_name from - # global_settings, but if the program forgot to set it, - # we'll go ahead and do so here. - if global_settings.client_name is None: - global_settings.client_name = pkg_client_name - - if cmdpath is None: - cmdpath = misc.api_cmdpath() - self.cmdpath = cmdpath - - # prevent brokeness in the test suite - if self.cmdpath and \ - "PKG_NO_RUNPY_CMDPATH" in os.environ and \ - self.cmdpath.endswith(os.sep + "run.py"): - raise RuntimeError(""" An ImageInterface object was allocated from within ipkg test suite and cmdpath was not explicitly overridden. Please make sure to set explicitly set cmdpath when allocating an ImageInterface object, or override cmdpath when allocating an Image object by setting PKG_CMDPATH -in the environment or by setting simulate_cmdpath in DebugValues.""") - - if isinstance(img_path, six.string_types): - # Store this for reset(). - self._img_path = img_path - self._img = image.Image(img_path, - progtrack=progresstracker, - user_provided_dir=exact_match, - cmdpath=self.cmdpath) - - # Store final image path. - self._img_path = self._img.get_root() - elif isinstance(img_path, image.Image): - # This is a temporary, special case for client.py - # until the image api is complete. - self._img = img_path - self._img_path = img_path.get_root() - else: - # API consumer passed an unknown type for img_path. - raise TypeError(_("Unknown img_path type.")) - - self.__progresstracker = progresstracker - lin = None - if self._img.linked.ischild(): - lin = self._img.linked.child_name - self.__progresstracker.set_linked_name(lin) - - self.__cancel_state_callable = cancel_state_callable - self.__plan_type = None - self.__api_op = None - self.__plan_desc = None - self.__planned_children = False - self.__prepared = False - self.__executed = False - self.__be_activate = True - self.__backup_be_name = None - self.__be_name = None - self.__can_be_canceled = False - self.__canceling = False - self._activity_lock = pkg.nrlock.NRLock() - self.__blocking_locks = False - self._img.blocking_locks = self.__blocking_locks - self.__cancel_lock = pkg.nrlock.NRLock() - self.__cancel_cv = threading.Condition(self.__cancel_lock) - self.__backup_be = None # create if needed - self.__new_be = None # create if needed - self.__alt_sources = {} - - def __set_blocking_locks(self, value): - self._activity_lock.acquire() - self.__blocking_locks = value - self._img.blocking_locks = value - self._activity_lock.release() - - def __set_img_alt_sources(self, repos): - """Private helper function to change image to use alternate - package sources if applicable.""" - - # When using alternate package sources with the image, the - # result is a composite of the package data already known - # by the image and the alternate sources. - if repos: - self._img.set_alt_pkg_sources( - self.__get_alt_pkg_data(repos)) - else: - self._img.set_alt_pkg_sources(None) - - @_LockedCancelable() - def set_alt_repos(self, repos): - """Public function to specify alternate package sources.""" - self.__set_img_alt_sources(repos) - - blocking_locks = property(lambda self: self.__blocking_locks, - __set_blocking_locks, doc="A boolean value indicating whether " - "the API should wait until the image interface can be locked if " - "it is in use by another thread or process. Clients should be " - "aware that there is no timeout mechanism in place if blocking is " - "enabled, and so should take steps to remain responsive to user " - "input or provide a way for users to cancel operations.") - - @property - def excludes(self): - """The list of excludes for the image.""" - return self._img.list_excludes() - - @property - def img(self): - """Private; public access to this property will be removed at - a later date. Do not use.""" - return self._img +in the environment or by setting simulate_cmdpath in DebugValues.""" + ) + + if isinstance(img_path, six.string_types): + # Store this for reset(). + self._img_path = img_path + self._img = image.Image( + img_path, + progtrack=progresstracker, + user_provided_dir=exact_match, + cmdpath=self.cmdpath, + ) + + # Store final image path. + self._img_path = self._img.get_root() + elif isinstance(img_path, image.Image): + # This is a temporary, special case for client.py + # until the image api is complete. + self._img = img_path + self._img_path = img_path.get_root() + else: + # API consumer passed an unknown type for img_path. + raise TypeError(_("Unknown img_path type.")) + + self.__progresstracker = progresstracker + lin = None + if self._img.linked.ischild(): + lin = self._img.linked.child_name + self.__progresstracker.set_linked_name(lin) + + self.__cancel_state_callable = cancel_state_callable + self.__plan_type = None + self.__api_op = None + self.__plan_desc = None + self.__planned_children = False + self.__prepared = False + self.__executed = False + self.__be_activate = True + self.__backup_be_name = None + self.__be_name = None + self.__can_be_canceled = False + self.__canceling = False + self._activity_lock = pkg.nrlock.NRLock() + self.__blocking_locks = False + self._img.blocking_locks = self.__blocking_locks + self.__cancel_lock = pkg.nrlock.NRLock() + self.__cancel_cv = threading.Condition(self.__cancel_lock) + self.__backup_be = None # create if needed + self.__new_be = None # create if needed + self.__alt_sources = {} + + def __set_blocking_locks(self, value): + self._activity_lock.acquire() + self.__blocking_locks = value + self._img.blocking_locks = value + self._activity_lock.release() + + def __set_img_alt_sources(self, repos): + """Private helper function to change image to use alternate + package sources if applicable.""" + + # When using alternate package sources with the image, the + # result is a composite of the package data already known + # by the image and the alternate sources. + if repos: + self._img.set_alt_pkg_sources(self.__get_alt_pkg_data(repos)) + else: + self._img.set_alt_pkg_sources(None) + + @_LockedCancelable() + def set_alt_repos(self, repos): + """Public function to specify alternate package sources.""" + self.__set_img_alt_sources(repos) + + blocking_locks = property( + lambda self: self.__blocking_locks, + __set_blocking_locks, + doc="A boolean value indicating whether " + "the API should wait until the image interface can be locked if " + "it is in use by another thread or process. Clients should be " + "aware that there is no timeout mechanism in place if blocking is " + "enabled, and so should take steps to remain responsive to user " + "input or provide a way for users to cancel operations.", + ) + + @property + def excludes(self): + """The list of excludes for the image.""" + return self._img.list_excludes() + + @property + def img(self): + """Private; public access to this property will be removed at + a later date. Do not use.""" + return self._img + + @property + def img_type(self): + """Returns the IMG_TYPE constant for the image's type.""" + if not self._img: + return None + return self._img.image_type(self._img.root) + + @property + def is_liveroot(self): + """A boolean indicating whether the image to be modified is + for the live system root.""" + return self._img.is_liveroot() + + @property + def is_zone(self): + """A boolean value indicating whether the image is a zone.""" + return self._img.is_zone() + + @property + def is_active_liveroot_be(self): + """A boolean indicating whether the image to be modified is + the active BE for the system's root image.""" + + if not self._img.is_liveroot(): + return False - @property - def img_type(self): - """Returns the IMG_TYPE constant for the image's type.""" - if not self._img: - return None - return self._img.image_type(self._img.root) - - @property - def is_liveroot(self): - """A boolean indicating whether the image to be modified is - for the live system root.""" - return self._img.is_liveroot() - - @property - def is_zone(self): - """A boolean value indicating whether the image is a zone.""" - return self._img.is_zone() - - @property - def is_active_liveroot_be(self): - """A boolean indicating whether the image to be modified is - the active BE for the system's root image.""" - - if not self._img.is_liveroot(): - return False + try: + be_name, be_uuid = bootenv.BootEnv.get_be_name(self._img.root) + return be_name == bootenv.BootEnv.get_activated_be_name( + bootnext=True + ) + except apx.BEException: + # If boot environment logic isn't supported, return + # False. This is necessary for user images and for + # the test suite. + return False + + @property + def img_plandir(self): + """A path to the image planning directory.""" + plandir = self._img.plandir + misc.makedirs(plandir) + return plandir + + @property + def last_modified(self): + """A datetime object representing when the image's metadata was + last updated.""" + + return self._img.get_last_modified() + + def __set_progresstracker(self, value): + self._activity_lock.acquire() + self.__progresstracker = value + + # tell the progress tracker about this image's name + lin = None + if self._img.linked.ischild(): + lin = self._img.linked.child_name + self.__progresstracker.set_linked_name(lin) + + self._activity_lock.release() + + progresstracker = property( + lambda self: self.__progresstracker, + __set_progresstracker, + doc="The current ProgressTracker object. " + "This value should only be set when no other API calls are in " + "progress.", + ) + + @property + def mediators(self): + """A dictionary of the mediators and their configured version + and implementation of the form: + + { + mediator-name: { + "version": mediator-version-string, + "version-source": (site|vendor|system|local), + "implementation": mediator-implementation-string, + "implementation-source": (site|vendor|system|local), + } + } + + 'version' is an optional string that specifies the version + (expressed as a dot-separated sequence of non-negative + integers) of the mediator for use. + + 'version-source' is a string describing the source of the + selected version configuration. It indicates how the + version component of the mediation was selected. + + 'implementation' is an optional string that specifies the + implementation of the mediator for use in addition to or + instead of 'version'. + + 'implementation-source' is a string describing the source of + the selected implementation configuration. It indicates how + the implementation component of the mediation was selected. + """ - try: - be_name, be_uuid = bootenv.BootEnv.get_be_name( - self._img.root) - return be_name == \ - bootenv.BootEnv.get_activated_be_name(bootnext=True) - except apx.BEException: - # If boot environment logic isn't supported, return - # False. This is necessary for user images and for - # the test suite. - return False - - @property - def img_plandir(self): - """A path to the image planning directory.""" - plandir = self._img.plandir - misc.makedirs(plandir) - return plandir - - @property - def last_modified(self): - """A datetime object representing when the image's metadata was - last updated.""" - - return self._img.get_last_modified() - - def __set_progresstracker(self, value): - self._activity_lock.acquire() - self.__progresstracker = value - - # tell the progress tracker about this image's name - lin = None - if self._img.linked.ischild(): - lin = self._img.linked.child_name - self.__progresstracker.set_linked_name(lin) - - self._activity_lock.release() - - progresstracker = property(lambda self: self.__progresstracker, - __set_progresstracker, doc="The current ProgressTracker object. " - "This value should only be set when no other API calls are in " - "progress.") - - @property - def mediators(self): - """A dictionary of the mediators and their configured version - and implementation of the form: - - { - mediator-name: { - "version": mediator-version-string, - "version-source": (site|vendor|system|local), - "implementation": mediator-implementation-string, - "implementation-source": (site|vendor|system|local), - } - } - - 'version' is an optional string that specifies the version - (expressed as a dot-separated sequence of non-negative - integers) of the mediator for use. - - 'version-source' is a string describing the source of the - selected version configuration. It indicates how the - version component of the mediation was selected. - - 'implementation' is an optional string that specifies the - implementation of the mediator for use in addition to or - instead of 'version'. - - 'implementation-source' is a string describing the source of - the selected implementation configuration. It indicates how - the implementation component of the mediation was selected. - """ - - ret = {} - for m, mvalues in six.iteritems(self._img.cfg.mediators): - ret[m] = copy.copy(mvalues) - if "version" in ret[m]: - # Don't expose internal Version object to - # external consumers. - ret[m]["version"] = \ - ret[m]["version"].get_short_version() - if "implementation-version" in ret[m]: - # Don't expose internal Version object to - # external consumers. - ret[m]["implementation-version"] = \ - ret[m]["implementation-version"].get_short_version() - return ret - - @property - def root(self): - """The absolute pathname of the filesystem root of the image. - This property is read-only.""" - if not self._img: - return None - return self._img.root - - @staticmethod - def check_be_name(be_name): - bootenv.BootEnv.check_be_name(be_name) - return True + ret = {} + for m, mvalues in six.iteritems(self._img.cfg.mediators): + ret[m] = copy.copy(mvalues) + if "version" in ret[m]: + # Don't expose internal Version object to + # external consumers. + ret[m]["version"] = ret[m]["version"].get_short_version() + if "implementation-version" in ret[m]: + # Don't expose internal Version object to + # external consumers. + ret[m]["implementation-version"] = ret[m][ + "implementation-version" + ].get_short_version() + return ret + + @property + def root(self): + """The absolute pathname of the filesystem root of the image. + This property is read-only.""" + if not self._img: + return None + return self._img.root + + @staticmethod + def check_be_name(be_name): + bootenv.BootEnv.check_be_name(be_name) + return True + + def __cert_verify(self, log_op_end=None): + """Verify validity of certificates. Any apx.ExpiringCertificate + exceptions are caught here, a message is displayed, and + execution continues. + + All other exceptions will be passed to the calling context. + The caller can also set log_op_end to a list of exceptions + that should result in a call to self.log_operation_end() + before the exception is passed on. + """ - def __cert_verify(self, log_op_end=None): - """Verify validity of certificates. Any apx.ExpiringCertificate - exceptions are caught here, a message is displayed, and - execution continues. + if log_op_end is None: + log_op_end = [] - All other exceptions will be passed to the calling context. - The caller can also set log_op_end to a list of exceptions - that should result in a call to self.log_operation_end() - before the exception is passed on. - """ + # we always explicitly handle apx.ExpiringCertificate + assert apx.ExpiringCertificate not in log_op_end - if log_op_end is None: - log_op_end = [] + try: + self._img.check_cert_validity() + except apx.ExpiringCertificate as e: + logger.warning(e) + except: + exc_type, exc_value, exc_traceback = sys.exc_info() + if exc_type in log_op_end: + self.log_operation_end(error=exc_value) + raise + + def __refresh_publishers(self): + """Refresh publisher metadata; this should only be used by + functions in this module for implicit refresh cases.""" + + # + # Verify validity of certificates before possibly + # attempting network operations. + # + self.__cert_verify() + try: + self._img.refresh_publishers( + immediate=True, progtrack=self.__progresstracker + ) + except apx.ImageFormatUpdateNeeded: + # If image format update is needed to perform refresh, + # continue on and allow failure to happen later since + # an implicit refresh failing for this reason isn't + # important. (This allows planning installs and updates + # before the format of an image is updated. Yes, this + # means that if the refresh was needed to do that, then + # this isn't useful, but this is as good as it gets.) + logger.warning( + _( + "Skipping publisher metadata refresh;" + "image rooted at {0} must have its format updated " + "before a refresh can occur." + ).format(self._img.root) + ) + + def _acquire_activity_lock(self): + """Private helper method to aqcuire activity lock.""" + + rc = self._activity_lock.acquire(blocking=self.__blocking_locks) + if not rc: + raise apx.ImageLockedError() + + def __plan_common_start( + self, + operation, + noexecute, + backup_be, + backup_be_name, + new_be, + be_name, + be_activate, + ): + """Start planning an operation: + Acquire locks. + Log the start of the operation. + Check be_name.""" + + self._acquire_activity_lock() + try: + self._enable_cancel() + if self.__plan_type is not None: + raise apx.PlanExistsException(self.__plan_type) + self._img.lock(allow_unprivileged=noexecute) + except OSError as e: + self._cancel_cleanup_exception() + self._activity_lock.release() + if e.errno in (errno.ENOSPC, errno.EDQUOT): + raise apx.ImageLockingFailedError(self._img_path, e.strerror) + raise + except: + self._cancel_cleanup_exception() + self._activity_lock.release() + raise + + assert self._activity_lock._is_owned() + self.log_operation_start(operation) + self.__backup_be = backup_be + self.__backup_be_name = backup_be_name + self.__new_be = new_be + self.__be_activate = be_activate + self.__be_name = be_name + for val in (self.__be_name, self.__backup_be_name): + if val is not None: + self.check_be_name(val) + if not self._img.is_liveroot(): + self._cancel_cleanup_exception() + self._activity_lock.release() + self._img.unlock() + raise apx.BENameGivenOnDeadBE(val) - # we always explicitly handle apx.ExpiringCertificate - assert apx.ExpiringCertificate not in log_op_end + def __plan_common_finish(self): + """Finish planning an operation.""" - try: - self._img.check_cert_validity() - except apx.ExpiringCertificate as e: - logger.warning(e) - except: - exc_type, exc_value, exc_traceback = sys.exc_info() - if exc_type in log_op_end: - self.log_operation_end(error=exc_value) - raise + assert self._activity_lock._is_owned() + self._img.cleanup_downloads() + self._img.unlock() + try: + if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: + self._img.transport.stats.dump() + except ValueError: + # Don't generate stats if an invalid value + # is supplied. + pass - def __refresh_publishers(self): - """Refresh publisher metadata; this should only be used by - functions in this module for implicit refresh cases.""" + self._activity_lock.release() - # - # Verify validity of certificates before possibly - # attempting network operations. - # - self.__cert_verify() - try: - self._img.refresh_publishers(immediate=True, - progtrack=self.__progresstracker) - except apx.ImageFormatUpdateNeeded: - # If image format update is needed to perform refresh, - # continue on and allow failure to happen later since - # an implicit refresh failing for this reason isn't - # important. (This allows planning installs and updates - # before the format of an image is updated. Yes, this - # means that if the refresh was needed to do that, then - # this isn't useful, but this is as good as it gets.) - logger.warning(_("Skipping publisher metadata refresh;" - "image rooted at {0} must have its format updated " - "before a refresh can occur.").format(self._img.root)) - - def _acquire_activity_lock(self): - """Private helper method to aqcuire activity lock.""" - - rc = self._activity_lock.acquire( - blocking=self.__blocking_locks) - if not rc: - raise apx.ImageLockedError() - - def __plan_common_start(self, operation, noexecute, backup_be, - backup_be_name, new_be, be_name, be_activate): - """Start planning an operation: - Acquire locks. - Log the start of the operation. - Check be_name.""" - - self._acquire_activity_lock() - try: - self._enable_cancel() - if self.__plan_type is not None: - raise apx.PlanExistsException( - self.__plan_type) - self._img.lock(allow_unprivileged=noexecute) - except OSError as e: - self._cancel_cleanup_exception() - self._activity_lock.release() - if e.errno in (errno.ENOSPC, errno.EDQUOT): - raise apx.ImageLockingFailedError( - self._img_path, e.strerror) - raise - except: - self._cancel_cleanup_exception() - self._activity_lock.release() - raise + def __auto_be_name(self): + try: + be_template = self._img.cfg.get_property( + "property", imgcfg.AUTO_BE_NAME + ) + except: + be_template = None + + if not be_template or len(be_template) == 0: + return + + if be_template.startswith(AUTO_BE_NAME_TIME_PREFIX): + try: + be_template = time.strftime( + be_template[len(AUTO_BE_NAME_TIME_PREFIX) :] + ) + except: + return + else: + release = date = None + # Check to see if release/name is being updated + for src, dest in self._img.imageplan.plan_desc: + if not dest or dest.get_name() != "release/name": + continue + # It is, extract attributes + for a in self._img.imageplan.pd.update_actions: + if not isinstance(a.dst, actions.attribute.AttributeAction): + continue + name = a.dst.attrs["name"] + if name == "ooce.release": + release = a.dst.attrs["value"] + elif name == "ooce.release.build": + date = a.dst.attrs["value"] + if release and date: + break + break + + if not release and not date: + # No variables changed in this update + return + + if "%r" in be_template and not release: + return + if "%d" in be_template and not date: + return + if "%D" in be_template and not date: + return + if release: + be_template = be_template.replace("%r", release) + if date: + be_template = be_template.replace("%d", date) + be_template = be_template.replace("%D", date.replace(".", "")) + + if not be_template or len(be_template) == 0: + return + + be = bootenv.BootEnv(self._img) + self.__be_name = be.get_new_be_name(new_bename=be_template) + + def __set_be_creation(self): + """Figure out whether or not we'd create a new or backup boot + environment given inputs and plan. Toss cookies if we need a + new be and can't have one.""" + + if not self._img.is_liveroot(): + self.__backup_be = False + self.__new_be = False + return + + if self.__new_be is None: + # If image policy requires a new BE or the plan requires + # it, then create a new BE. + self.__new_be = ( + self._img.cfg.get_policy_str(imgcfg.BE_POLICY) == "always-new" + or self._img.imageplan.reboot_needed() + ) + elif self.__new_be is False and self._img.imageplan.reboot_needed(): + raise apx.ImageUpdateOnLiveImageException() + + # If a new BE is required and no BE name has been provided + # on the command line, attempt to determine a BE name + # automatically. + if self.__new_be == True and self.__be_name == None: + self.__auto_be_name() + + if not self.__new_be and self.__backup_be is None: + # Create a backup be if allowed by policy (note that the + # 'default' policy is currently an alias for + # 'create-backup') ... + allow_backup = self._img.cfg.get_policy_str(imgcfg.BE_POLICY) in ( + "default", + "create-backup", + ) + + self.__backup_be = False + if allow_backup: + # ...when packages are being + # updated... + for src, dest in self._img.imageplan.plan_desc: + if src and dest: + self.__backup_be = True + break + if allow_backup and not self.__backup_be: + # ...or if new packages that have + # reboot-needed=true are being + # installed. + self.__backup_be = self._img.imageplan.reboot_advised() + + def abort(self, result=RESULT_FAILED_UNKNOWN): + """Indicate that execution was unexpectedly aborted and log + operation failure if possible.""" + try: + # This can raise if, for example, we're aborting + # because we have a PipeError and we can no longer + # write. So suppress problems here. + if self.__progresstracker: + self.__progresstracker.flush() + except: + pass - assert self._activity_lock._is_owned() - self.log_operation_start(operation) - self.__backup_be = backup_be - self.__backup_be_name = backup_be_name - self.__new_be = new_be - self.__be_activate = be_activate - self.__be_name = be_name - for val in (self.__be_name, self.__backup_be_name): - if val is not None: - self.check_be_name(val) - if not self._img.is_liveroot(): - self._cancel_cleanup_exception() - self._activity_lock.release() - self._img.unlock() - raise apx.BENameGivenOnDeadBE(val) - - def __plan_common_finish(self): - """Finish planning an operation.""" - - assert self._activity_lock._is_owned() - self._img.cleanup_downloads() - self._img.unlock() - try: - if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: - self._img.transport.stats.dump() - except ValueError: - # Don't generate stats if an invalid value - # is supplied. - pass + self._img.history.abort(result) - self._activity_lock.release() + def avoid_pkgs(self, fmri_strings, unavoid=False): + """Avoid/Unavoid one or more packages. It is an error to + avoid an installed package, or unavoid one that would + be installed.""" - def __auto_be_name(self): - try: - be_template = self._img.cfg.get_property( - 'property', imgcfg.AUTO_BE_NAME) - except: - be_template = None + self._acquire_activity_lock() + try: + if not unavoid: + self._img.avoid_pkgs( + fmri_strings, + progtrack=self.__progresstracker, + check_cancel=self.__check_cancel, + ) + else: + self._img.unavoid_pkgs( + fmri_strings, + progtrack=self.__progresstracker, + check_cancel=self.__check_cancel, + ) + finally: + self._activity_lock.release() + return True + + def flag_pkgs(self, fmri_strings, flag, value): + if flag == "manual": + state = PackageInfo.MANUAL + else: + raise apx.InvalidOptionErrors("Unknown flag") + + pfmris = [] + for pfmri, _, _, _, _ in self.get_pkg_list( + pkg_list=self.LIST_INSTALLED, + patterns=fmri_strings, + raise_unmatched=True, + return_fmris=True, + ): + pfmris.append(pfmri) + + self._acquire_activity_lock() - if not be_template or len(be_template) == 0: - return + try: + self._img.flag_pkgs( + pfmris=pfmris, + state=state, + value=value, + progtrack=self.__progresstracker, + ) + finally: + self._activity_lock.release() + return True + + def gen_available_mediators(self): + """A generator function that yields tuples of the form + (mediator, mediations), where mediator is the name of the + provided mediation and mediations is a list of dictionaries + of possible mediations to set, provided by installed + packages, of the form: + + { + mediator-name: { + "version": mediator-version-string, + "version-source": (site|vendor|system|local), + "implementation": mediator-implementation-string, + "implementation-source": (site|vendor|system|local), + } + } + + 'version' is an optional string that specifies the version + (expressed as a dot-separated sequence of non-negative + integers) of the mediator for use. + + 'version-source' is a string describing how the version + component of the mediation will be evaluated during + mediation. (The priority.) + + 'implementation' is an optional string that specifies the + implementation of the mediator for use in addition to or + instead of 'version'. + + 'implementation-source' is a string describing how the + implementation component of the mediation will be evaluated + during mediation. (The priority.) + + The list of possible mediations returned for each mediator is + ordered by source in the sequence 'site', 'vendor', 'system', + and then by version and implementation. It does not include + mediations that exist only in the image configuration. + """ - if be_template.startswith(AUTO_BE_NAME_TIME_PREFIX): + ret = collections.defaultdict(set) + excludes = self._img.list_excludes() + for f in self._img.gen_installed_pkgs(): + mfst = self._img.get_manifest(f) + for m, mediations in mfst.gen_mediators(excludes=excludes): + ret[m].update(mediations) + + for mediator in sorted(ret): + for med_priority, med_ver, med_impl in sorted( + ret[mediator], key=cmp_to_key(med.cmp_mediations) + ): + val = {} + if med_ver: + # Don't expose internal Version object + # to callers. + val["version"] = med_ver.get_short_version() + if med_impl: + val["implementation"] = med_impl + + ret_priority = med_priority + if not ret_priority: + # For consistency with the configured + # case, list source as this. + ret_priority = "system" + # Always set both to be consistent + # with @mediators. + val["version-source"] = ret_priority + val["implementation-source"] = ret_priority + yield mediator, val + + def get_avoid_list(self): + """Return list of tuples of (pkg stem, pkgs w/ group + dependencies on this)""" + return [a for a in six.iteritems(self._img.get_avoid_dict())] + + def gen_facets(self, facet_list, implicit=False, patterns=misc.EmptyI): + """A generator function that produces tuples of the form: + + ( + name, - (string) facet name (e.g. facet.doc) + value - (boolean) current facet value + src - (string) source for the value + masked - (boolean) is the facet maksed by another + ) + + Results are always sorted by facet name. + + 'facet_list' is one of the following constant values indicating + which facets should be returned based on how they were set: + + FACET_ALL + Return all facets set in the image and all + facets listed in installed packages. + + FACET_IMAGE + Return only the facets set in the image. + + FACET_INSTALLED + Return only the facets listed in installed + packages. + + 'implicit' is a boolean indicating whether facets specified in + the 'patterns' parameter that are not explicitly set in the + image or found in a package should be included. Ignored for + FACET_INSTALLED case. + + 'patterns' is an optional list of facet wildcard strings to + filter results by.""" + + facets = self._img.cfg.facets + if facet_list != self.FACET_INSTALLED: + # Include all facets set in image. + fimg = set(facets.keys()) + else: + # Don't include any set only in image. + fimg = set() + + # Get all facets found in packages and determine state. + fpkg = set() + excludes = self._img.list_excludes() + if facet_list != self.FACET_IMAGE: + for f in self._img.gen_installed_pkgs(): + # The manifest must be loaded without + # pre-applying excludes so that gen_facets() can + # choose how to filter the actions. + mfst = self._img.get_manifest(f, ignore_excludes=True) + for facet in mfst.gen_facets(excludes=excludes): + # Use Facets object to determine + # effective facet state. + fpkg.add(facet) + + # If caller wants implicit values, include non-glob patterns + # (even if not found) in results unless only installed facets + # were requested. + iset = set() + if implicit and facet_list != self.FACET_INSTALLED: + iset = set( + p.startswith("facet.") and p or ("facet." + p) + for p in patterns + if "*" not in p and "?" not in p + ) + flist = sorted(fimg | fpkg | iset) + + # Generate the results. + for name in misc.yield_matching("facet.", flist, patterns): + # check if the facet is explicitly set. + if name not in facets: + # The image's Facets dictionary will return + # the effective value for any facets not + # explicitly set in the image (wildcards or + # implicit). _match_src() will tell us how + # that effective value was determined (via a + # local or inherited wildcard facet, or via a + # system default). + src = facets._match_src(name) + yield (name, facets[name], src, False) + continue + + # This is an explicitly set facet. + for value, src, masked in facets._src_values(name): + yield (name, value, src, masked) + + def gen_variants(self, variant_list, implicit=False, patterns=misc.EmptyI): + """A generator function that produces tuples of the form: + + ( + name, - (string) variant name (e.g. variant.arch) + value - (string) current variant value, + possible - (list) list of possible variant values based + on installed packages; empty unless using + *_POSSIBLE variant_list. + ) + + Results are always sorted by variant name. + + 'variant_list' is one of the following constant values indicating + which variants should be returned based on how they were set: + + VARIANT_ALL + Return all variants set in the image and all + variants listed in installed packages. + + VARIANT_ALL_POSSIBLE + Return possible variant values (those found in + any installed package) for all variants set in + the image and all variants listed in installed + packages. + + VARIANT_IMAGE + Return only the variants set in the image. + + VARIANT_IMAGE_POSSIBLE + Return possible variant values (those found in + any installed package) for only the variants set + in the image. + + VARIANT_INSTALLED + Return only the variants listed in installed + packages. + + VARIANT_INSTALLED_POSSIBLE + Return possible variant values (those found in + any installed package) for only the variants + listed in installed packages. + + 'implicit' is a boolean indicating whether variants specified in + the 'patterns' parameter that are not explicitly set in the + image or found in a package should be included. Ignored for + VARIANT_INSTALLED* cases. + + 'patterns' is an optional list of variant wildcard strings to + filter results by.""" + + variants = self._img.cfg.variants + if ( + variant_list != self.VARIANT_INSTALLED + and variant_list != self.VARIANT_INSTALLED_POSSIBLE + ): + # Include all variants set in image. + vimg = set(variants.keys()) + else: + # Don't include any set only in image. + vimg = set() + + # Get all variants found in packages and determine state. + vpkg = {} + excludes = self._img.list_excludes() + vposs = collections.defaultdict(set) + if variant_list != self.VARIANT_IMAGE: + # Only incur the overhead of reading through all + # installed packages if not just listing variants set in + # image or listing possible values for them. + for f in self._img.gen_installed_pkgs(): + # The manifest must be loaded without + # pre-applying excludes so that gen_variants() + # can choose how to filter the actions. + mfst = self._img.get_manifest(f, ignore_excludes=True) + for variant, vals in mfst.gen_variants(excludes=excludes): + if variant not in vimg: + # Although rare, packages with + # unknown variants (those not + # set in the image) can be + # installed as long as content + # does not conflict. For those + # variants, return None. This + # is done without using get() as + # that would cause None to be + # returned for implicitly set + # variants (e.g. debug). try: - be_template = time.strftime( - be_template[len(AUTO_BE_NAME_TIME_PREFIX):]) - except: - return - else: - release = date = None - # Check to see if release/name is being updated - for src, dest in self._img.imageplan.plan_desc: - if (not dest or - dest.get_name() != 'release/name'): - continue - # It is, extract attributes - for a in self._img.imageplan.pd.update_actions: - if not isinstance(a.dst, - actions.attribute.AttributeAction): - continue - name = a.dst.attrs['name'] - if name == 'ooce.release': - release = a.dst.attrs['value'] - elif name == 'ooce.release.build': - date = a.dst.attrs['value'] - if release and date: - break - break - - if not release and not date: - # No variables changed in this update - return - - if '%r' in be_template and not release: - return - if '%d' in be_template and not date: - return - if '%D' in be_template and not date: - return - if release: - be_template = be_template.replace('%r', release) - if date: - be_template = be_template.replace('%d', date) - be_template = be_template.replace('%D', - date.replace('.', '')) - - if not be_template or len(be_template) == 0: - return - - be = bootenv.BootEnv(self._img) - self.__be_name = be.get_new_be_name(new_bename=be_template) - - def __set_be_creation(self): - """Figure out whether or not we'd create a new or backup boot - environment given inputs and plan. Toss cookies if we need a - new be and can't have one.""" - - if not self._img.is_liveroot(): - self.__backup_be = False - self.__new_be = False - return - - if self.__new_be is None: - # If image policy requires a new BE or the plan requires - # it, then create a new BE. - self.__new_be = (self._img.cfg.get_policy_str( - imgcfg.BE_POLICY) == "always-new" or - self._img.imageplan.reboot_needed()) - elif self.__new_be is False and \ - self._img.imageplan.reboot_needed(): - raise apx.ImageUpdateOnLiveImageException() - - # If a new BE is required and no BE name has been provided - # on the command line, attempt to determine a BE name - # automatically. - if self.__new_be == True and self.__be_name == None: - self.__auto_be_name() - - if not self.__new_be and self.__backup_be is None: - # Create a backup be if allowed by policy (note that the - # 'default' policy is currently an alias for - # 'create-backup') ... - allow_backup = self._img.cfg.get_policy_str( - imgcfg.BE_POLICY) in ("default", - "create-backup") - - self.__backup_be = False - if allow_backup: - # ...when packages are being - # updated... - for src, dest in self._img.imageplan.plan_desc: - if src and dest: - self.__backup_be = True - break - if allow_backup and not self.__backup_be: - # ...or if new packages that have - # reboot-needed=true are being - # installed. - self.__backup_be = \ - self._img.imageplan.reboot_advised() - - def abort(self, result=RESULT_FAILED_UNKNOWN): - """Indicate that execution was unexpectedly aborted and log - operation failure if possible.""" - try: - # This can raise if, for example, we're aborting - # because we have a PipeError and we can no longer - # write. So suppress problems here. - if self.__progresstracker: - self.__progresstracker.flush() - except: - pass - - self._img.history.abort(result) - - def avoid_pkgs(self, fmri_strings, unavoid=False): - """Avoid/Unavoid one or more packages. It is an error to - avoid an installed package, or unavoid one that would - be installed.""" + vpkg[variant] = variants[variant] + except KeyError: + vpkg[variant] = None + + if ( + variant_list == self.VARIANT_ALL_POSSIBLE + or variant_list == self.VARIANT_IMAGE_POSSIBLE + or variant_list == self.VARIANT_INSTALLED_POSSIBLE + ): + # Build possible list of variant + # values. + vposs[variant].update(set(vals)) + + # If caller wants implicit values, include non-glob debug + # patterns (even if not found) in results unless only installed + # variants were requested. + iset = set() + if ( + implicit + and variant_list != self.VARIANT_INSTALLED + and variant_list != self.VARIANT_INSTALLED_POSSIBLE + ): + # Normalize patterns. + iset = set( + p.startswith("variant.") and p or ("variant." + p) + for p in patterns + if "*" not in p and "?" not in p + ) + # Only debug variants can have an implicit value. + iset = set(p for p in iset if p.startswith("variant.debug.")) + vlist = sorted(vimg | set(vpkg.keys()) | iset) + + # Generate the results. + for name in misc.yield_matching("variant.", vlist, patterns): + try: + yield (name, vpkg[name], sorted(vposs[name])) + except KeyError: + yield (name, variants[name], sorted(vposs[name])) + + def freeze_pkgs( + self, fmri_strings, dry_run=False, comment=None, unfreeze=False + ): + """Freeze/Unfreeze one or more packages.""" + + # Comment is only a valid parameter if a freeze is happening. + assert not comment or not unfreeze + + self._acquire_activity_lock() + try: + if unfreeze: + return self._img.unfreeze_pkgs( + fmri_strings, + progtrack=self.__progresstracker, + check_cancel=self.__check_cancel, + dry_run=dry_run, + ) + else: + return self._img.freeze_pkgs( + fmri_strings, + progtrack=self.__progresstracker, + check_cancel=self.__check_cancel, + dry_run=dry_run, + comment=comment, + ) + finally: + self._activity_lock.release() - self._acquire_activity_lock() - try: - if not unavoid: - self._img.avoid_pkgs(fmri_strings, - progtrack=self.__progresstracker, - check_cancel=self.__check_cancel) - else: - self._img.unavoid_pkgs(fmri_strings, - progtrack=self.__progresstracker, - check_cancel=self.__check_cancel) - finally: - self._activity_lock.release() - return True + def get_frozen_list(self): + """Return list of tuples of (pkg fmri, reason package was + frozen, timestamp when package was frozen).""" - def flag_pkgs(self, fmri_strings, flag, value): - if flag == 'manual': - state = PackageInfo.MANUAL - else: - raise apx.InvalidOptionErrors('Unknown flag') + return self._img.get_frozen_list() - pfmris = [] - for pfmri, _, _, _, _ in self.get_pkg_list( - pkg_list=self.LIST_INSTALLED, patterns=fmri_strings, - raise_unmatched=True, return_fmris=True): - pfmris.append(pfmri) + def cleanup_cached_content(self, verbose=False): + """Clean up any cached content.""" - self._acquire_activity_lock() + self._acquire_activity_lock() + try: + return self._img.cleanup_cached_content( + progtrack=self.__progresstracker, force=True, verbose=verbose + ) + finally: + self._activity_lock.release() + + def __plan_common_exception(self, log_op_end_all=False): + """Deal with exceptions that can occur while planning an + operation. Any exceptions generated here are passed + onto the calling context. By default all exceptions + will result in a call to self.log_operation_end() before + they are passed onto the calling context.""" + + exc_type, exc_value, exc_traceback = sys.exc_info() + + if exc_type == apx.PlanCreationException: + self.__set_history_PlanCreationException(exc_value) + elif exc_type == apx.CanceledException: + self._cancel_done() + elif exc_type == apx.ConflictingActionErrors: + self.log_operation_end( + error=str(exc_value), result=RESULT_CONFLICTING_ACTIONS + ) + elif exc_type in [apx.IpkgOutOfDateException, fmri.IllegalFmri]: + self.log_operation_end(error=exc_value) + elif log_op_end_all: + self.log_operation_end(error=exc_value) + + if exc_type not in (apx.ImageLockedError, apx.ImageLockingFailedError): + # Must be called before reset_unlock, and only if + # the exception was not a locked error. + self._img.unlock() - try: - self._img.flag_pkgs(pfmris=pfmris, - state=state, value=value, - progtrack=self.__progresstracker) - finally: - self._activity_lock.release() + try: + if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: + self._img.transport.stats.dump() + except ValueError: + # Don't generate stats if an invalid value + # is supplied. + pass + + # In the case of duplicate actions, we want to save off the plan + # description for display to the client (if they requested it), + # as once the solver's done its job, there's interesting + # information in the plan. We have to save it here and restore + # it later because __reset_unlock() torches it. + if exc_type == apx.ConflictingActionErrors: + self._img.imageplan.set_be_options( + self.__backup_be, + self.__backup_be_name, + self.__new_be, + self.__be_activate, + self.__be_name, + ) + plan_desc = self._img.imageplan.describe() + + self.__reset_unlock() + + if exc_type == apx.ConflictingActionErrors: + self.__plan_desc = plan_desc + + self._activity_lock.release() + + # re-raise the original exception. (we have to explicitly + # restate the original exception since we may have cleared the + # current exception scope above.) + six.reraise(exc_type, exc_value, exc_traceback) + + def solaris_image(self): + """Returns True if the current image is a solaris image, or an + image which contains the pkg(7) packaging system.""" + + # First check to see if the special package "release/name" + # exists and contains metadata saying this is Solaris. + results = self.__get_pkg_list( + self.LIST_INSTALLED, patterns=["release/name"], return_fmris=True + ) + results = [e for e in results] + if results: + pfmri, summary, categories, states, attrs = results[0] + mfst = self._img.get_manifest(pfmri) + osname = mfst.get("pkg.release.osname", None) + if osname == "sunos": return True - def gen_available_mediators(self): - """A generator function that yields tuples of the form - (mediator, mediations), where mediator is the name of the - provided mediation and mediations is a list of dictionaries - of possible mediations to set, provided by installed - packages, of the form: - - { - mediator-name: { - "version": mediator-version-string, - "version-source": (site|vendor|system|local), - "implementation": mediator-implementation-string, - "implementation-source": (site|vendor|system|local), - } - } - - 'version' is an optional string that specifies the version - (expressed as a dot-separated sequence of non-negative - integers) of the mediator for use. - - 'version-source' is a string describing how the version - component of the mediation will be evaluated during - mediation. (The priority.) - - 'implementation' is an optional string that specifies the - implementation of the mediator for use in addition to or - instead of 'version'. - - 'implementation-source' is a string describing how the - implementation component of the mediation will be evaluated - during mediation. (The priority.) - - The list of possible mediations returned for each mediator is - ordered by source in the sequence 'site', 'vendor', 'system', - and then by version and implementation. It does not include - mediations that exist only in the image configuration. - """ - - ret = collections.defaultdict(set) - excludes = self._img.list_excludes() - for f in self._img.gen_installed_pkgs(): - mfst = self._img.get_manifest(f) - for m, mediations in mfst.gen_mediators( - excludes=excludes): - ret[m].update(mediations) - - for mediator in sorted(ret): - for med_priority, med_ver, med_impl in sorted( - ret[mediator], key=cmp_to_key(med.cmp_mediations)): - val = {} - if med_ver: - # Don't expose internal Version object - # to callers. - val["version"] = \ - med_ver.get_short_version() - if med_impl: - val["implementation"] = med_impl - - ret_priority = med_priority - if not ret_priority: - # For consistency with the configured - # case, list source as this. - ret_priority = "system" - # Always set both to be consistent - # with @mediators. - val["version-source"] = ret_priority - val["implementation-source"] = \ - ret_priority - yield mediator, val - - def get_avoid_list(self): - """Return list of tuples of (pkg stem, pkgs w/ group - dependencies on this) """ - return [a for a in six.iteritems(self._img.get_avoid_dict())] - - def gen_facets(self, facet_list, implicit=False, patterns=misc.EmptyI): - """A generator function that produces tuples of the form: + # Otherwise, see if we can find package/pkg (or SUNWipkg) and + # system/core-os (or SUNWcs). + results = self.__get_pkg_list( + self.LIST_INSTALLED, + patterns=["/package/pkg", "SUNWipkg", "/system/core-os", "SUNWcs"], + ) + installed = set(e[0][1] for e in results) + if ("SUNWcs" in installed or "system/core-os" in installed) and ( + "SUNWipkg" in installed or "package/pkg" in installed + ): + return True - ( - name, - (string) facet name (e.g. facet.doc) - value - (boolean) current facet value - src - (string) source for the value - masked - (boolean) is the facet maksed by another - ) - - Results are always sorted by facet name. - - 'facet_list' is one of the following constant values indicating - which facets should be returned based on how they were set: + return False - FACET_ALL - Return all facets set in the image and all - facets listed in installed packages. + def __ipkg_require_latest(self, noexecute): + """Raises an IpkgOutOfDateException if the current image + contains the pkg(7) packaging system and a newer version + of the pkg(7) packaging system is installable.""" - FACET_IMAGE - Return only the facets set in the image. + if not self.solaris_image(): + return - FACET_INSTALLED - Return only the facets listed in installed - packages. + # Get old purpose in order to be able to restore it on return. + p = self.__progresstracker.get_purpose() - 'implicit' is a boolean indicating whether facets specified in - the 'patterns' parameter that are not explicitly set in the - image or found in a package should be included. Ignored for - FACET_INSTALLED case. - - 'patterns' is an optional list of facet wildcard strings to - filter results by.""" - - facets = self._img.cfg.facets - if facet_list != self.FACET_INSTALLED: - # Include all facets set in image. - fimg = set(facets.keys()) + try: + # + # Let progress tracker know that subsequent callbacks + # into it will all be in service of update checking. + # Note that even though this might return, the + # finally: will still reset the purpose. + # + self.__progresstracker.set_purpose( + self.__progresstracker.PURPOSE_PKG_UPDATE_CHK + ) + if self._img.ipkg_is_up_to_date( + self.__check_cancel, + noexecute, + refresh_allowed=False, + progtrack=self.__progresstracker, + ): + return + except apx.ImageNotFoundException: + # Can't do anything in this + # case; so proceed. + return + finally: + self.__progresstracker.set_purpose(p) + + raise apx.IpkgOutOfDateException() + + def __verify_args(self, args): + """Verifies arguments passed into the API. + It tests for correct data types of the input args, verifies that + passed in FMRIs are valid, checks if repository URIs are valid + and does some logical tests for the combination of arguments.""" + + arg_types = { + # arg name type nullable + "_act_timeout": (int, False), + "_be_activate": ("activate", False), + "_be_name": (six.string_types, True), + "_backup_be": (bool, True), + "_backup_be_name": (six.string_types, True), + "_ignore_missing": (bool, False), + "_ipkg_require_latest": (bool, False), + "_li_erecurse": (iter, True), + "_li_ignore": (iter, True), + "_li_md_only": (bool, False), + "_li_parent_sync": (bool, False), + "_new_be": (bool, True), + "_noexecute": (bool, False), + "_pubcheck": (bool, False), + "_refresh_catalogs": (bool, False), + "_repos": (iter, True), + "_update_index": (bool, False), + "facets": (dict, True), + "mediators": (iter, True), + "pkgs_inst": (iter, True), + "pkgs_to_uninstall": (iter, True), + "pkgs_update": (iter, True), + "reject_list": (iter, True), + "variants": (dict, True), + } + + # merge kwargs into the main arg dict + if "kwargs" in args: + for name, value in args["kwargs"].items(): + args[name] = value + + # check arguments for proper type and nullability + for a in args: + try: + a_type, nullable = arg_types[a] + except KeyError: + # unknown argument passed, ignore + continue + + assert nullable or args[a] is not None + + if args[a] is not None and a_type == iter: + try: + iter(args[a]) + except TypeError: + raise AssertionError("{0} is not an " "iterable".format(a)) + elif a_type == "activate": + assert isinstance(args[a], bool) or ( + isinstance(args[a], str) and args[a] == "bootnext" + ) + else: + assert args[a] is None or isinstance( + args[a], a_type + ), "{0} is " "type {1}; expected {2}".format(a, type(a), a_type) + + # check if passed FMRIs are valid + illegals = [] + for i in ( + "pkgs_inst", + "pkgs_update", + "pkgs_to_uninstall", + "reject_list", + ): + try: + fmris = args[i] + except KeyError: + continue + if fmris is None: + continue + for pat, err, pfmri, matcher in self.parse_fmri_patterns(fmris): + if not err: + continue else: - # Don't include any set only in image. - fimg = set() - - # Get all facets found in packages and determine state. - fpkg = set() - excludes = self._img.list_excludes() - if facet_list != self.FACET_IMAGE: - for f in self._img.gen_installed_pkgs(): - # The manifest must be loaded without - # pre-applying excludes so that gen_facets() can - # choose how to filter the actions. - mfst = self._img.get_manifest(f, - ignore_excludes=True) - for facet in mfst.gen_facets(excludes=excludes): - # Use Facets object to determine - # effective facet state. - fpkg.add(facet) - - # If caller wants implicit values, include non-glob patterns - # (even if not found) in results unless only installed facets - # were requested. - iset = set() - if implicit and facet_list != self.FACET_INSTALLED: - iset = set( - p.startswith("facet.") and p or ("facet." + p) - for p in patterns - if "*" not in p and "?" not in p - ) - flist = sorted(fimg | fpkg | iset) - - # Generate the results. - for name in misc.yield_matching("facet.", flist, patterns): - # check if the facet is explicitly set. - if name not in facets: - # The image's Facets dictionary will return - # the effective value for any facets not - # explicitly set in the image (wildcards or - # implicit). _match_src() will tell us how - # that effective value was determined (via a - # local or inherited wildcard facet, or via a - # system default). - src = facets._match_src(name) - yield (name, facets[name], src, False) - continue - - # This is an explicitly set facet. - for value, src, masked in facets._src_values(name): - yield (name, value, src, masked) + illegals.append(fmris) + + if illegals: + raise apx.PlanCreationException(illegal=illegals) + + # some logical checks + errors = [] + if not args["_new_be"] and args["_be_name"]: + errors.append( + apx.InvalidOptionError( + apx.InvalidOptionError.REQUIRED, ["_be_name", "_new_be"] + ) + ) + if not args["_backup_be"] and args["_backup_be_name"]: + errors.append( + apx.InvalidOptionError( + apx.InvalidOptionError.REQUIRED, + ["_backup_be_name", "_backup_be"], + ) + ) + if args["_backup_be"] and args["_new_be"]: + errors.append( + apx.InvalidOptionError( + apx.InvalidOptionError.INCOMPAT, ["_backup_be", "_new_be"] + ) + ) + + if errors: + raise apx.InvalidOptionErrors(errors) + + # check if repo URIs are valid + try: + repos = args["_repos"] + except KeyError: + return + + if not repos: + return + + illegals = [] + for r in repos: + valid = False + if type(r) == publisher.RepositoryURI: + # RepoURI objects pass right away + continue + + if not misc.valid_pub_url(r): + illegals.append(r) + + if illegals: + raise apx.UnsupportedRepositoryURI(illegals) + + def __plan_op( + self, + _op, + _act_timeout=0, + _ad_kwargs=None, + _backup_be=None, + _backup_be_name=None, + _be_activate=True, + _be_name=None, + _ipkg_require_latest=False, + _li_ignore=None, + _li_erecurse=None, + _li_md_only=False, + _li_parent_sync=True, + _new_be=False, + _noexecute=False, + _pubcheck=True, + _refresh_catalogs=True, + _repos=None, + _update_index=True, + **kwargs, + ): + """Contructs a plan to change the package or linked image + state of an image. + + We can raise PermissionsException, PlanCreationException, + InventoryException, or LinkedImageException. + + Arguments prefixed with '_' are primarily used within this + function. All other arguments must be specified via keyword + assignment and will be passed directly on to the image + interfaces being invoked." + + '_op' is the API operation we will perform. + + '_ad_kwargs' is only used dyring attach or detach and it + is a dictionary of arguments that will be passed to the + linked image attach/detach interfaces. + + '_ipkg_require_latest' enables a check to verify that the + latest installable version of the pkg(7) packaging system is + installed before we proceed with the requested operation. + + For all other '_' prefixed parameters, please refer to the + 'gen_plan_*' functions which invoke this function for an + explanation of their usage and effects. + + This function first yields the plan description for the global + zone, then either a series of dictionaries representing the + parsable output from operating on the child images or a series + of None values.""" + + # sanity checks + assert _op in api_op_values + assert _ad_kwargs is None or _op in [API_OP_ATTACH, API_OP_DETACH] + assert _ad_kwargs != None or _op not in [API_OP_ATTACH, API_OP_DETACH] + assert not _li_md_only or _op in [ + API_OP_ATTACH, + API_OP_DETACH, + API_OP_SYNC, + ] + assert not _li_md_only or _li_parent_sync + + self.__verify_args(locals()) + + # make some perf optimizations + if _li_md_only: + _refresh_catalogs = _update_index = False + if _op in [ + API_OP_DETACH, + API_OP_SET_MEDIATOR, + API_OP_FIX, + API_OP_VERIFY, + API_OP_DEHYDRATE, + API_OP_REHYDRATE, + ]: + # these operations don't change fmris and don't need + # to recurse, so disable a bunch of linked image + # operations. + _li_parent_sync = False + _pubcheck = False + _li_ignore = [] # ignore all children + + # All the image interface functions that we invoke have some + # common arguments. Set those up now. + args_common = {} + args_common["op"] = _op + args_common["progtrack"] = self.__progresstracker + args_common["check_cancel"] = self.__check_cancel + args_common["noexecute"] = _noexecute + + # make sure there is no overlap between the common arguments + # supplied to all api interfaces and the arguments that the + # api arguments that caller passed to this function. + assert ( + set(args_common) & set(kwargs) + ) == set(), "{0} & {1} != set()".format( + str(set(args_common)), str(set(kwargs)) + ) + kwargs.update(args_common) - def gen_variants(self, variant_list, implicit=False, - patterns=misc.EmptyI): - """A generator function that produces tuples of the form: + try: + # Lock the current image. + self.__plan_common_start( + _op, + _noexecute, + _backup_be, + _backup_be_name, + _new_be, + _be_name, + _be_activate, + ) - ( - name, - (string) variant name (e.g. variant.arch) - value - (string) current variant value, - possible - (list) list of possible variant values based - on installed packages; empty unless using - *_POSSIBLE variant_list. - ) + except: + raise - Results are always sorted by variant name. + try: + if _op == API_OP_ATTACH: + self._img.linked.attach_parent(**_ad_kwargs) + elif _op == API_OP_DETACH: + self._img.linked.detach_parent(**_ad_kwargs) - 'variant_list' is one of the following constant values indicating - which variants should be returned based on how they were set: + if _li_parent_sync: + # refresh linked image data from parent image. + self._img.linked.syncmd_from_parent() - VARIANT_ALL - Return all variants set in the image and all - variants listed in installed packages. + # initialize recursion state + self._img.linked.api_recurse_init( + li_ignore=_li_ignore, repos=_repos + ) - VARIANT_ALL_POSSIBLE - Return possible variant values (those found in - any installed package) for all variants set in - the image and all variants listed in installed - packages. + if _pubcheck: + # check that linked image pubs are in sync + self.__linked_pubcheck(_op) + + if _refresh_catalogs: + self.__refresh_publishers() + + if _ipkg_require_latest: + # If this is an image update then make + # sure the latest version of the ipkg + # software is installed. + self.__ipkg_require_latest(_noexecute) + + self.__set_img_alt_sources(_repos) + + if _li_md_only: + self._img.make_noop_plan(**args_common) + elif _op in [API_OP_ATTACH, API_OP_DETACH, API_OP_SYNC]: + self._img.make_sync_plan(**kwargs) + elif _op in [API_OP_CHANGE_FACET, API_OP_CHANGE_VARIANT]: + self._img.make_change_varcets_plan(**kwargs) + elif _op == API_OP_DEHYDRATE: + self._img.make_dehydrate_plan(**kwargs) + elif _op == API_OP_INSTALL or _op == API_OP_EXACT_INSTALL: + self._img.make_install_plan(**kwargs) + elif _op in [API_OP_FIX, API_OP_VERIFY]: + self._img.make_fix_plan(**kwargs) + elif _op == API_OP_REHYDRATE: + self._img.make_rehydrate_plan(**kwargs) + elif _op == API_OP_REVERT: + self._img.make_revert_plan(**kwargs) + elif _op == API_OP_SET_MEDIATOR: + self._img.make_set_mediators_plan(**kwargs) + elif _op == API_OP_UNINSTALL: + self._img.make_uninstall_plan(**kwargs) + elif _op == API_OP_UPDATE: + self._img.make_update_plan(**kwargs) + else: + raise RuntimeError("Unknown api op: {0}".format(_op)) + + self.__api_op = _op + + if self._img.imageplan.nothingtodo(): + # no package changes mean no index changes + _update_index = False + + self._disable_cancel() + self.__set_be_creation() + self._img.imageplan.set_be_options( + self.__backup_be, + self.__backup_be_name, + self.__new_be, + self.__be_activate, + self.__be_name, + ) + self.__plan_desc = self._img.imageplan.describe() + if not _noexecute: + self.__plan_type = self.__plan_desc.plan_type + + if _act_timeout != 0: + self.__plan_desc.set_actuator_timeout(_act_timeout) + + # Yield to our caller so they can display our plan + # before we recurse into child images. Drop the + # activity lock before yielding because otherwise the + # caller can't do things like set the displayed + # license state for pkg plans). + self._activity_lock.release() + yield self.__plan_desc + self._activity_lock.acquire() + + # plan operation in child images. This currently yields + # either a dictionary representing the parsable output + # from the child image operation, or None. Eventually + # these will yield plan descriptions objects instead. + + for p_dict in self._img.linked.api_recurse_plan( + api_kwargs=kwargs, + erecurse_list=_li_erecurse, + refresh_catalogs=_refresh_catalogs, + update_index=_update_index, + progtrack=self.__progresstracker, + ): + yield p_dict + + self.__planned_children = True - VARIANT_IMAGE - Return only the variants set in the image. + except: + if _op in [ + API_OP_UPDATE, + API_OP_INSTALL, + API_OP_REVERT, + API_OP_SYNC, + ]: + self.__plan_common_exception(log_op_end_all=True) + else: + self.__plan_common_exception() + # NOTREACHED + + stuff_to_do = not self.planned_nothingtodo() + + if not stuff_to_do or _noexecute: + self.log_operation_end(result=RESULT_NOTHING_TO_DO) + + self._img.imageplan.update_index = _update_index + self.__plan_common_finish() + + # Value 'DebugValues' is unsubscriptable; + # pylint: disable=E1136 + if DebugValues["plandesc_validate"]: + # save, load, and get a new json copy of the plan, + # then compare that new copy against our current one. + # this regressions tests the plan save/load code. + pd_json1 = self.__plan_desc.getstate( + self.__plan_desc, reset_volatiles=True + ) + fobj = tempfile.TemporaryFile(mode="w+") + json.dump(pd_json1, fobj) + pd_new = plandesc.PlanDescription(_op) + pd_new._load(fobj) + pd_json2 = pd_new.getstate(pd_new, reset_volatiles=True) + fobj.close() + del fobj, pd_new + pkg.misc.json_diff( + "PlanDescription", pd_json1, pd_json2, pd_json1, pd_json2 + ) + del pd_json1, pd_json2 + + @_LockedCancelable() + def load_plan(self, plan, prepared=False): + """Load a previously generated PlanDescription.""" + + # Prevent loading a plan if one has been already. + if self.__plan_type is not None: + raise apx.PlanExistsException(self.__plan_type) + + # grab image lock. we don't worry about dropping the image + # lock since __activity_lock will drop it for us us after we + # return (or if we generate an exception). + self._img.lock() + + # load the plan + self.__plan_desc = plan + self.__plan_type = plan.plan_type + self.__planned_children = True + self.__prepared = prepared + + # load BE related plan settings + self.__new_be = plan.new_be + self.__be_activate = plan.activate_be + self.__be_name = plan.be_name + + # sanity check: verify the BE name + if self.__be_name is not None: + self.check_be_name(self.__be_name) + if not self._img.is_liveroot(): + raise apx.BENameGivenOnDeadBE(self.__be_name) + + # sanity check: verify that all the fmris in the plan are in + # the known catalog + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_KNOWN) + for pp in plan.pkg_plans: + if pp.destination_fmri: + assert pkg_cat.get_entry( + pp.destination_fmri + ), "fmri part of plan, but currently " "unknown: {0}".format( + pp.destination_fmri + ) + + # allocate an image plan based on the supplied plan + self._img.imageplan = imageplan.ImagePlan( + self._img, + plan._op, + self.__progresstracker, + check_cancel=self.__check_cancel, + pd=plan, + ) + + if prepared: + self._img.imageplan.skip_preexecute() + + # create a history entry + self.log_operation_start(plan.plan_type) + + def __linked_pubcheck(self, api_op=None): + """Private interface to perform publisher check on this image + and its children.""" + + if api_op in [API_OP_DETACH, API_OP_SET_MEDIATOR]: + # we don't need to do a pubcheck for detach or + # changing mediators + return + + # check the current image + self._img.linked.pubcheck() + + # check child images + self._img.linked.api_recurse_pubcheck(self.__progresstracker) + + @_LockedCancelable() + def linked_publisher_check(self): + """If we're a child image, verify that the parent image's + publisher configuration is a subset of the child image's + publisher configuration. If we have any children, recurse + into them and perform a publisher check.""" + + # grab image lock. we don't worry about dropping the image + # lock since __activity_lock will drop it for us us after we + # return (or if we generate an exception). + self._img.lock(allow_unprivileged=True) + + # get ready to recurse + self._img.linked.api_recurse_init() + + # check that linked image pubs are in sync + self.__linked_pubcheck() + + @_LockedCancelable() + def hotfix_origin_cleanup(self): + # grab image lock. Cleanup is handled by the decorator. + self._img.lock(allow_unprivileged=False) + + # prepare for recursion + self._img.linked.api_recurse_init() + + # clean up the image + self._img.hotfix_origin_cleanup() + + # clean up children + self._img.linked.api_recurse_hfo_cleanup(self.__progresstracker) + + def planned_nothingtodo(self, li_ignore_all=False): + """Once an operation has been planned check if there is + something todo. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'li_ignore_all' indicates if we should only report on work + todo in the parent image. (i.e., if an operation was planned + and that operation only involves changes to children, and + li_ignore_all is true, then we'll report that there's nothing + todo.""" + + if not self._img.imageplan: + # if theres no plan there nothing to do + return True + if not self._img.imageplan.nothingtodo(): + return False + if not self._img.linked.nothingtodo(): + return False + if not li_ignore_all: + assert self.__planned_children + if not self._img.linked.recurse_nothingtodo(): + return False + return True + + def plan_update( + self, + pkg_list, + refresh_catalogs=True, + reject_list=misc.EmptyI, + noexecute=False, + update_index=True, + be_name=None, + new_be=False, + repos=None, + be_activate=True, + ): + """DEPRECATED. use gen_plan_update().""" + for pd in self.gen_plan_update( + pkgs_update=pkg_list, + refresh_catalogs=refresh_catalogs, + reject_list=reject_list, + noexecute=noexecute, + update_index=update_index, + be_name=be_name, + new_be=new_be, + repos=repos, + be_activate=be_activate, + ): + continue + return not self.planned_nothingtodo() + + def plan_update_all( + self, + refresh_catalogs=True, + reject_list=misc.EmptyI, + noexecute=False, + force=False, + update_index=True, + be_name=None, + new_be=True, + repos=None, + be_activate=True, + ): + """DEPRECATED. use gen_plan_update().""" + for pd in self.gen_plan_update( + refresh_catalogs=refresh_catalogs, + reject_list=reject_list, + noexecute=noexecute, + force=force, + update_index=update_index, + be_name=be_name, + new_be=new_be, + repos=repos, + be_activate=be_activate, + ): + continue + return (not self.planned_nothingtodo(), self.solaris_image()) + + def gen_plan_update( + self, + pkgs_update=None, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + force=False, + ignore_missing=False, + li_ignore=None, + li_parent_sync=True, + li_erecurse=None, + new_be=True, + noexecute=False, + pubcheck=True, + refresh_catalogs=True, + reject_list=misc.EmptyI, + repos=None, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + If pkgs_update is not set, constructs a plan to update all + packages on the system to the latest known versions. Once an + operation has been planned, it may be executed by first + calling prepare(), and then execute_plan(). After execution + of a plan, or to abandon a plan, reset() should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + If 'pkgs_update' is set, constructs a plan to update the + packages provided in pkgs_update. + + Once an operation has been planned, it may be executed by + first calling prepare(), and then execute_plan(). + + 'force' indicates whether update should skip the package + system up to date check. + + 'ignore_missing' indicates whether update should ignore packages + which are not installed. + + 'pubcheck' indicates that we should skip the child image + publisher check before creating a plan for this image. only + pkg.1 should use this parameter, other callers should never + specify it. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + if pkgs_update or force: + ipkg_require_latest = False + else: + ipkg_require_latest = True + + op = API_OP_UPDATE + return self.__plan_op( + op, + _act_timeout=act_timeout, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _ipkg_require_latest=ipkg_require_latest, + _li_ignore=li_ignore, + _li_parent_sync=li_parent_sync, + _li_erecurse=li_erecurse, + _new_be=new_be, + _noexecute=noexecute, + _pubcheck=pubcheck, + _refresh_catalogs=refresh_catalogs, + _repos=repos, + _update_index=update_index, + ignore_missing=ignore_missing, + pkgs_update=pkgs_update, + reject_list=reject_list, + ) + + def plan_install( + self, + pkg_list, + refresh_catalogs=True, + noexecute=False, + update_index=True, + be_name=None, + reject_list=misc.EmptyI, + new_be=False, + repos=None, + be_activate=True, + ): + """DEPRECATED. use gen_plan_install().""" + for pd in self.gen_plan_install( + pkgs_inst=pkg_list, + refresh_catalogs=refresh_catalogs, + noexecute=noexecute, + update_index=update_index, + be_name=be_name, + reject_list=reject_list, + new_be=new_be, + repos=repos, + be_activate=be_activate, + ): + continue + return not self.planned_nothingtodo() + + def gen_plan_install( + self, + pkgs_inst, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_erecurse=None, + li_ignore=None, + li_parent_sync=True, + new_be=False, + noexecute=False, + pubcheck=True, + refresh_catalogs=True, + reject_list=misc.EmptyI, + repos=None, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Constructs a plan to install the packages provided in + pkgs_inst. Once an operation has been planned, it may be + executed by first calling prepare(), and then execute_plan(). + After execution of a plan, or to abandon a plan, reset() + should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'act_timeout' sets the timeout for synchronous actuators in + seconds, -1 is no timeout, 0 is for using asynchronous + actuators. + + 'backup_be' indicates whether a backup boot environment should + be created before the operation is executed. If True, a backup + boot environment will be created. If False, a backup boot + environment will not be created. If None and a new boot + environment is not created, and packages are being updated or + are being installed and tagged with reboot-needed, a backup + boot environment will be created. + + 'backup_be_name' is a string to use as the name of any backup + boot environment created during the operation. + + 'be_activate' is an optional boolean indicating whether any + new boot environment created for the operation should be set + as the active one on next boot if the operation is successful. + + 'be_name' is a string to use as the name of any new boot + environment created during the operation. + + 'li_erecurse' is either None or a list. If it's None (the + default), the planning operation will not explicitly recurse + into linked children to perform the requested operation. If this + is a list of linked image children names, the requested + operation will be performed in each of the specified + children. + + 'li_ignore' is either None or a list. If it's None (the + default), the planning operation will attempt to keep all + linked children in sync. If it's an empty list the planning + operation will ignore all children. If this is a list of + linked image children names, those children will be ignored + during the planning operation. If a child is ignored during + the planning phase it will also be skipped during the + preparation and execution phases. + + 'li_parent_sync' if the current image is a child image, this + flag controls whether the linked image parent metadata will be + automatically refreshed. + + 'new_be' indicates whether a new boot environment should be + created during the operation. If True, a new boot environment + will be created. If False, and a new boot environment is + needed, an ImageUpdateOnLiveImageException will be raised. + If None, a new boot environment will be created only if needed. + + 'noexecute' determines whether the resulting plan can be + executed and whether history will be recorded after + planning is finished. + + 'pkgs_inst' is a list of packages to install. + + 'refresh_catalogs' controls whether the catalogs will + automatically be refreshed. + + 'reject_list' is a list of patterns not to be permitted + in solution; installed packages matching these patterns + are removed. + + 'repos' is a list of URI strings or RepositoryURI objects that + represent the locations of additional sources of package data to + use during the planned operation. All API functions called + while a plan is still active will use this package data. + + 'update_index' determines whether client search indexes + will be updated after operation completion during plan + execution.""" + + # certain parameters must be specified + assert pkgs_inst and type(pkgs_inst) == list + + op = API_OP_INSTALL + return self.__plan_op( + op, + _act_timeout=act_timeout, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_erecurse=li_erecurse, + _li_ignore=li_ignore, + _li_parent_sync=li_parent_sync, + _new_be=new_be, + _noexecute=noexecute, + _pubcheck=pubcheck, + _refresh_catalogs=refresh_catalogs, + _repos=repos, + _update_index=update_index, + pkgs_inst=pkgs_inst, + reject_list=reject_list, + ) + + def gen_plan_exact_install( + self, + pkgs_inst, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_ignore=None, + li_parent_sync=True, + new_be=False, + noexecute=False, + refresh_catalogs=True, + reject_list=misc.EmptyI, + repos=None, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Constructs a plan to install exactly the packages provided in + pkgs_inst. Once an operation has been planned, it may be + executed by first calling prepare(), and then execute_plan(). + After execution of a plan, or to abandon a plan, reset() + should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'pkgs_inst' is a list of packages to install exactly. + + For all other parameters, refer to 'gen_plan_install' + for an explanation of their usage and effects.""" + + # certain parameters must be specified + assert pkgs_inst and type(pkgs_inst) == list + + op = API_OP_EXACT_INSTALL + return self.__plan_op( + op, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_ignore=li_ignore, + _li_parent_sync=li_parent_sync, + _new_be=new_be, + _noexecute=noexecute, + _refresh_catalogs=refresh_catalogs, + _repos=repos, + _update_index=update_index, + pkgs_inst=pkgs_inst, + reject_list=reject_list, + ) + + def gen_plan_sync( + self, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_ignore=None, + li_md_only=False, + li_parent_sync=True, + li_pkg_updates=True, + new_be=False, + noexecute=False, + pubcheck=True, + refresh_catalogs=True, + reject_list=misc.EmptyI, + repos=None, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Constructs a plan to sync the current image with its + linked image constraints. Once an operation has been planned, + it may be executed by first calling prepare(), and then + execute_plan(). After execution of a plan, or to abandon a + plan, reset() should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'li_md_only' don't actually modify any packages in the current + images, only sync the linked image metadata from the parent + image. If this options is True, 'li_parent_sync' must also be + True. + + 'li_pkg_updates' when planning a sync operation, allow updates + to packages other than the constraints package. If this + option is False, planning a sync will fail if any packages + (other than the constraints package) need updating to bring + the image in sync with its parent. + + For all other parameters, refer to 'gen_plan_install' and + 'gen_plan_update' for an explanation of their usage and + effects.""" + + # we should only be invoked on a child image. + if not self.ischild(): + raise apx.LinkedImageException(self_not_child=self._img_path) + + op = API_OP_SYNC + return self.__plan_op( + op, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_ignore=li_ignore, + _li_md_only=li_md_only, + _li_parent_sync=li_parent_sync, + _new_be=new_be, + _noexecute=noexecute, + _pubcheck=pubcheck, + _refresh_catalogs=refresh_catalogs, + _repos=repos, + _update_index=update_index, + li_pkg_updates=li_pkg_updates, + reject_list=reject_list, + ) + + def gen_plan_attach( + self, + lin, + li_path, + allow_relink=False, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + force=False, + li_ignore=None, + li_md_only=False, + li_pkg_updates=True, + li_props=None, + new_be=False, + noexecute=False, + refresh_catalogs=True, + reject_list=misc.EmptyI, + repos=None, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Attach a parent image and sync the packages in the current + image with the new parent. Once an operation has been + planned, it may be executed by first calling prepare(), and + then execute_plan(). After execution of a plan, or to abandon + a plan, reset() should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'lin' a LinkedImageName object that is a name for the current + image. + + 'li_path' a path to the parent image. + + 'allow_relink' allows re-linking of an image that is already a + linked image child. If this option is True we'll overwrite + all existing linked image metadata. + + 'li_props' optional linked image properties to apply to the + child image. + + For all other parameters, refer to the 'gen_plan_install' and + 'gen_plan_sync' functions for an explanation of their usage + and effects.""" + + if li_props is None: + li_props = dict() + + op = API_OP_ATTACH + ad_kwargs = { + "allow_relink": allow_relink, + "force": force, + "lin": lin, + "path": li_path, + "props": li_props, + } + return self.__plan_op( + op, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_ignore=li_ignore, + _li_md_only=li_md_only, + _new_be=new_be, + _noexecute=noexecute, + _refresh_catalogs=refresh_catalogs, + _repos=repos, + _update_index=update_index, + _ad_kwargs=ad_kwargs, + li_pkg_updates=li_pkg_updates, + reject_list=reject_list, + ) + + def gen_plan_detach( + self, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + force=False, + li_ignore=None, + li_md_only=False, + li_pkg_updates=True, + new_be=False, + noexecute=False, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Detach from a parent image and remove any constraints + package from this image. Once an operation has been planned, + it may be executed by first calling prepare(), and then + execute_plan(). After execution of a plan, or to abandon a + plan, reset() should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + For all other parameters, refer to the 'gen_plan_install' and + 'gen_plan_sync' functions for an explanation of their usage + and effects.""" + + op = API_OP_DETACH + ad_kwargs = {"force": force} + return self.__plan_op( + op, + _ad_kwargs=ad_kwargs, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_ignore=li_ignore, + _li_md_only=li_md_only, + _new_be=new_be, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=False, + li_pkg_updates=li_pkg_updates, + ) + + def plan_uninstall( + self, + pkg_list, + noexecute=False, + update_index=True, + be_name=None, + new_be=False, + be_activate=True, + ): + """DEPRECATED. use gen_plan_uninstall().""" + for pd in self.gen_plan_uninstall( + pkgs_to_uninstall=pkg_list, + noexecute=noexecute, + update_index=update_index, + be_name=be_name, + new_be=new_be, + be_activate=be_activate, + ): + continue + return not self.planned_nothingtodo() + + def gen_plan_uninstall( + self, + pkgs_to_uninstall, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + ignore_missing=False, + li_ignore=None, + li_parent_sync=True, + li_erecurse=None, + new_be=False, + noexecute=False, + pubcheck=True, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Constructs a plan to remove the packages provided in + pkgs_to_uninstall. Once an operation has been planned, it may + be executed by first calling prepare(), and then + execute_plan(). After execution of a plan, or to abandon a + plan, reset() should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'ignore_missing' indicates whether uninstall should ignore + packages which are not installed. + + 'pkgs_to_uninstall' is a list of packages to uninstall. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + # certain parameters must be specified + assert pkgs_to_uninstall and type(pkgs_to_uninstall) == list + + op = API_OP_UNINSTALL + return self.__plan_op( + op, + _act_timeout=act_timeout, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_erecurse=li_erecurse, + _li_ignore=li_ignore, + _li_parent_sync=li_parent_sync, + _new_be=new_be, + _noexecute=noexecute, + _pubcheck=pubcheck, + _refresh_catalogs=False, + _update_index=update_index, + ignore_missing=ignore_missing, + pkgs_to_uninstall=pkgs_to_uninstall, + ) + + def gen_plan_set_mediators( + self, + mediators, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_ignore=None, + li_parent_sync=True, + new_be=None, + noexecute=False, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Creates a plan to change the version and implementation values + for mediators as specified in the provided dictionary. Once an + operation has been planned, it may be executed by first calling + prepare(), and then execute_plan(). After execution of a plan, + or to abandon a plan, reset() should be called. + + Callers should pass all arguments by name assignment and not by + positional order. + + 'mediators' is a dict of dicts of the mediators to set version + and implementation for. If the dict for a given mediator-name + is empty, it will be interpreted as a request to revert the + specified mediator to the default, "optimal" mediation. It + should be of the form: + + { + mediator-name: { + "implementation": mediator-implementation-string, + "version": mediator-version-string + } + } + + 'implementation' is an optional string that specifies the + implementation of the mediator for use in addition to or + instead of 'version'. + + 'version' is an optional string that specifies the version + (expressed as a dot-separated sequence of non-negative + integers) of the mediator for use. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + assert mediators + return self.__plan_op( + API_OP_SET_MEDIATOR, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_ignore=li_ignore, + _li_parent_sync=li_parent_sync, + mediators=mediators, + _new_be=new_be, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=update_index, + ) + + def plan_change_varcets( + self, + variants=None, + facets=None, + noexecute=False, + be_name=None, + new_be=None, + repos=None, + be_activate=True, + ): + """DEPRECATED. use gen_plan_change_varcets().""" + for pd in self.gen_plan_change_varcets( + variants=variants, + facets=facets, + noexecute=noexecute, + be_name=be_name, + new_be=new_be, + repos=repos, + be_activate=be_activate, + ): + continue + return not self.planned_nothingtodo() + + def gen_plan_change_varcets( + self, + facets=None, + variants=None, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_erecurse=None, + li_ignore=None, + li_parent_sync=True, + new_be=None, + noexecute=False, + pubcheck=True, + refresh_catalogs=True, + reject_list=misc.EmptyI, + repos=None, + update_index=True, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Creates a plan to change the specified variants and/or + facets for the image. Once an operation has been planned, it + may be executed by first calling prepare(), and then + execute_plan(). After execution of a plan, or to abandon a + plan, reset() should be called. + + Callers should pass all arguments by name assignment and + not by positional order. + + 'facets' is a dict of the facets to change the values of. + + 'variants' is a dict of the variants to change the values of. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + # An empty facets dictionary is allowed because that's how to + # unset all set facets. + if not variants and facets is None: + raise ValueError("Nothing to do") + + invalid_names = [] + if variants: + op = API_OP_CHANGE_VARIANT + # Check whether '*' or '?' is in the input. Currently, + # change-variant does not accept globbing. Also check + # for whitespaces. + for variant in variants: + if "*" in variant or "?" in variant: + raise apx.UnsupportedVariantGlobbing() + if not misc.valid_varcet_name(variant): + invalid_names.append(variant) + else: + op = API_OP_CHANGE_FACET + for facet in facets: + # Explict check for not None so that we can fix + # a broken system from the past by clearing + # the facet. Neither True of False should be + # allowed for this special facet. + if ( + facet == "facet.version-lock.*" + and facets[facet] is not None + ): + raise apx.UnsupportedFacetChange(facet, facets[facet]) + if not misc.valid_varcet_name(facet): + invalid_names.append(facet) + if invalid_names: + raise apx.InvalidVarcetNames(invalid_names) + + return self.__plan_op( + op, + _act_timeout=act_timeout, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_activate=be_activate, + _be_name=be_name, + _li_erecurse=li_erecurse, + _li_ignore=li_ignore, + _li_parent_sync=li_parent_sync, + _new_be=new_be, + _noexecute=noexecute, + _pubcheck=pubcheck, + _refresh_catalogs=refresh_catalogs, + _repos=repos, + _update_index=update_index, + facets=facets, + variants=variants, + reject_list=reject_list, + ) + + def plan_revert( + self, + args, + tagged=False, + noexecute=True, + be_name=None, + new_be=None, + be_activate=True, + ): + """DEPRECATED. use gen_plan_revert().""" + for pd in self.gen_plan_revert( + args=args, + tagged=tagged, + noexecute=noexecute, + be_name=be_name, + new_be=new_be, + be_activate=be_activate, + ): + continue + return not self.planned_nothingtodo() + + def gen_plan_revert( + self, + args, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + new_be=None, + noexecute=True, + tagged=False, + ): + """This is a generator function that yields a PlanDescription + object. If parsable_version is set, it also yields dictionaries + containing plan information for child images. + + Plan to revert either files or all files tagged with + specified values. Args contains either path names or tag + names to be reverted, tagged is True if args contains tags. + Once an operation has been planned, it may be executed by + first calling prepare(), and then execute_plan(). After + execution of a plan, or to abandon a plan, reset() should be + called. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + op = API_OP_REVERT + return self.__plan_op( + op, + _be_activate=be_activate, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_name=be_name, + _li_ignore=[], + _new_be=new_be, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=False, + args=args, + tagged=tagged, + ) + + def gen_plan_dehydrate(self, publishers=None, noexecute=True): + """This is a generator function that yields a PlanDescription + object. + + Plan to remove non-editable files and hardlinks from an image. + Once an operation has been planned, it may be executed by + first calling prepare(), and then execute_plan(). After + execution of a plan, or to abandon a plan, reset() should be + called. + + 'publishers' is a list of publishers to dehydrate. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + op = API_OP_DEHYDRATE + return self.__plan_op( + op, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=False, + publishers=publishers, + ) + + def gen_plan_rehydrate(self, publishers=None, noexecute=True): + """This is a generator function that yields a PlanDescription + object. + + Plan to reinstall non-editable files and hardlinks to a dehydrated + image. Once an operation has been planned, it may be executed by + first calling prepare(), and then execute_plan(). After + execution of a plan, or to abandon a plan, reset() should be + called. + + 'publishers' is a list of publishers to dehydrate on. + + For all other parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + op = API_OP_REHYDRATE + return self.__plan_op( + op, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=False, + publishers=publishers, + ) + + def gen_plan_verify( + self, + args, + noexecute=True, + unpackaged=False, + unpackaged_only=False, + verify_paths=misc.EmptyI, + ): + """This is a generator function that yields a PlanDescription + object. + + Plan to repair anything that fails to verify. Once an operation + has been planned, it may be executed by first calling prepare(), + and then execute_plan(). After execution of a plan, or to + abandon a plan, reset() should be called. + + For parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + op = API_OP_VERIFY + return self.__plan_op( + op, + args=args, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=False, + _new_be=None, + unpackaged=unpackaged, + unpackaged_only=unpackaged_only, + verify_paths=verify_paths, + ) + + def gen_plan_fix( + self, + args, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + new_be=None, + noexecute=True, + unpackaged=False, + ): + """This is a generator function that yields a PlanDescription + object. + + Plan to repair anything that fails to verify. Once an operation + has been planned, it may be executed by first calling prepare(), + and then execute_plan(). After execution of a plan, or to + abandon a plan, reset() should be called. + + For parameters, refer to the 'gen_plan_install' + function for an explanation of their usage and effects.""" + + op = API_OP_FIX + return self.__plan_op( + op, + args=args, + _be_activate=be_activate, + _backup_be=backup_be, + _backup_be_name=backup_be_name, + _be_name=be_name, + _new_be=new_be, + _noexecute=noexecute, + _refresh_catalogs=False, + _update_index=False, + unpackaged=unpackaged, + ) + + def attach_linked_child( + self, + lin, + li_path, + li_props=None, + accept=False, + allow_relink=False, + force=False, + li_md_only=False, + li_pkg_updates=True, + noexecute=False, + refresh_catalogs=True, + reject_list=misc.EmptyI, + show_licenses=False, + update_index=True, + ): + """Attach an image as a child to the current image (the + current image will become a parent image. This operation + results in attempting to sync the child image with the parent + image. + + 'lin' is the name of the child image + + 'li_path' is the path to the child image + + 'li_props' optional linked image properties to apply to the + child image. + + 'allow_relink' indicates whether we should allow linking of a + child image that is already linked (the child may already + be a child or a parent image). + + 'force' indicates whether we should allow linking of a child + image even if the specified linked image type doesn't support + attaching of children. + + 'li_md_only' indicates whether we should only update linked + image metadata and not actually try to sync the child image. + + 'li_pkg_updates' indicates whether we should disallow pkg + updates during the child image sync. + + 'noexecute' indicates if we should actually make any changes + rather or just simulate the operation. + + 'refresh_catalogs' controls whether the catalogs will + automatically be refreshed. + + 'reject_list' is a list of patterns not to be permitted + in solution; installed packages matching these patterns + are removed. + + 'update_index' determines whether client search indexes will + be updated in the child after the sync operation completes. + + This function returns a tuple of the format (rv, err) where rv + is a pkg.client.pkgdefs return value and if an error was + encountered err is an exception object which describes the + error.""" + + return self._img.linked.attach_child( + lin, + li_path, + li_props, + accept=accept, + allow_relink=allow_relink, + force=force, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + progtrack=self.__progresstracker, + refresh_catalogs=refresh_catalogs, + reject_list=reject_list, + show_licenses=show_licenses, + update_index=update_index, + ) + + def detach_linked_children( + self, + li_list, + force=False, + li_md_only=False, + li_pkg_updates=True, + noexecute=False, + ): + """Detach one or more children from the current image. This + operation results in the removal of any constraint package + from the child images. + + 'li_list' a list of linked image name objects which specified + which children to operate on. If the list is empty then we + operate on all children. + + For all other parameters, refer to the 'attach_linked_child' + function for an explanation of their usage and effects. + + This function returns a dictionary where the keys are linked + image name objects and the values are the result of the + specified operation on the associated child image. The result + is a tuple of the format (rv, err) where rv is a + pkg.client.pkgdefs return value and if an error was + encountered err is an exception object which describes the + error.""" + + return self._img.linked.detach_children( + li_list, + force=force, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + ) + + def detach_linked_rvdict2rv(self, rvdict): + """Convenience function that takes a dictionary returned from + an operations on multiple children and merges the results into + a single return code.""" + + return self._img.linked.detach_rvdict2rv(rvdict) + + def sync_linked_children( + self, + li_list, + accept=False, + li_md_only=False, + li_pkg_updates=True, + noexecute=False, + refresh_catalogs=True, + show_licenses=False, + update_index=True, + ): + """Sync one or more children of the current image. + + For all other parameters, refer to the 'attach_linked_child' + and 'detach_linked_children' functions for an explanation of + their usage and effects. + + For a description of the return value, refer to the + 'detach_linked_children' function.""" + + rvdict = self._img.linked.sync_children( + li_list, + accept=accept, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + progtrack=self.__progresstracker, + refresh_catalogs=refresh_catalogs, + show_licenses=show_licenses, + update_index=update_index, + ) + return rvdict + + def sync_linked_rvdict2rv(self, rvdict): + """Convenience function that takes a dictionary returned from + an operations on multiple children and merges the results into + a single return code.""" + + return self._img.linked.sync_rvdict2rv(rvdict) + + def audit_linked_children(self, li_list): + """Audit one or more children of the current image to see if + they are in sync with this image. + + For all parameters, refer to the 'detach_linked_children' + functions for an explanation of their usage and effects. + + For a description of the return value, refer to the + 'detach_linked_children' function.""" + + rvdict = self._img.linked.audit_children(li_list) + return rvdict + + def audit_linked_rvdict2rv(self, rvdict): + """Convenience function that takes a dictionary returned from + an operations on multiple children and merges the results into + a single return code.""" + + return self._img.linked.audit_rvdict2rv(rvdict) + + def audit_linked(self, li_parent_sync=True): + """If the current image is a child image, this function + audits the current image to see if it's in sync with it's + parent. + + For a description of the return value, refer to the + 'detach_linked_children' function.""" + + lin = self._img.linked.child_name + rvdict = {} + + if li_parent_sync: + # refresh linked image data from parent image. + rvdict[lin] = self._img.linked.syncmd_from_parent( + catch_exception=True + ) + if rvdict[lin] is not None: + return rvdict - VARIANT_IMAGE_POSSIBLE - Return possible variant values (those found in - any installed package) for only the variants set - in the image. + rvdict[lin] = self._img.linked.audit_self() + return rvdict - VARIANT_INSTALLED - Return only the variants listed in installed - packages. + def ischild(self): + """Indicates whether the current image is a child image.""" + return self._img.linked.ischild() - VARIANT_INSTALLED_POSSIBLE - Return possible variant values (those found in - any installed package) for only the variants - listed in installed packages. + def isparent(self, li_ignore=None): + """Indicates whether the current image is a parent image.""" + return self._img.linked.isparent(li_ignore) - 'implicit' is a boolean indicating whether variants specified in - the 'patterns' parameter that are not explicitly set in the - image or found in a package should be included. Ignored for - VARIANT_INSTALLED* cases. + @staticmethod + def __utc_format(time_str, utc_now): + """Given a local time value string, formatted with + "%Y-%m-%dT%H:%M:%S, return a UTC representation of that value, + formatted with %Y%m%dT%H%M%SZ. This raises a ValueError if the + time was incorrectly formatted. If the time_str is "now", it + returns the value of utc_now""" - 'patterns' is an optional list of variant wildcard strings to - filter results by.""" + if time_str == "now": + return utc_now - variants = self._img.cfg.variants - if variant_list != self.VARIANT_INSTALLED and \ - variant_list != self.VARIANT_INSTALLED_POSSIBLE: - # Include all variants set in image. - vimg = set(variants.keys()) + try: + local_dt = datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S") + secs = time.mktime(local_dt.timetuple()) + utc_dt = datetime.datetime.utcfromtimestamp(secs) + return utc_dt.strftime("%Y%m%dT%H%M%SZ") + except ValueError as e: + raise apx.HistoryRequestException(e) + + def __get_history_paths(self, time_val, utc_now): + """Given a local timestamp, either as a discrete value, or a + range of values, formatted as '-', and a + path to find history xml files, return an array of paths that + match that timestamp. utc_now is the current time expressed in + UTC""" + + files = [] + if len(time_val) > 20 or time_val.startswith("now-"): + if time_val.startswith("now-"): + start = utc_now + finish = self.__utc_format(time_val[4:], utc_now) + else: + # our ranges are 19 chars of timestamp, a '-', + # and another timestamp + start = self.__utc_format(time_val[:19], utc_now) + finish = self.__utc_format(time_val[20:], utc_now) + if start > finish: + raise apx.HistoryRequestException( + _( + "Start " "time must be older than finish time: " "{0}" + ).format(time_val) + ) + files = self.__get_history_range(start, finish) + else: + # there can be multiple event files per timestamp + prefix = self.__utc_format(time_val, utc_now) + files = glob.glob( + os.path.join(self._img.history.path, "{0}*".format(prefix)) + ) + if not files: + raise apx.HistoryRequestException( + _("No history " "entries found for {0}").format(time_val) + ) + return files + + def __get_history_range(self, start, finish): + """Given a start and finish date, formatted as UTC date strings + as per __utc_format(), return a list of history filenames that + fall within that date range. A range of two equal dates is + the equivalent of just retrieving history for that single date + string.""" + + entries = [] + all_entries = sorted(os.listdir(self._img.history.path)) + + for entry in all_entries: + # our timestamps are always 16 character datestamps + basename = os.path.basename(entry)[:16] + if basename >= start: + if basename > finish: + # we can stop looking now. + break + entries.append(entry) + return entries + + def gen_history(self, limit=None, times=misc.EmptyI): + """A generator function that returns History objects up to the + limit specified matching the times specified. + + 'limit' is an optional integer value specifying the maximum + number of entries to return. + + 'times' is a list of timestamp or timestamp range strings to + restrict the returned entries to.""" + + # Make entries a set to cope with multiple overlapping ranges or + # times. + entries = set() + + utc_now = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") + for time_val in times: + # Ranges are 19 chars of timestamp, a '-', and + # another timestamp. + if len(time_val) > 20 or time_val.startswith("now-"): + if time_val.startswith("now-"): + start = utc_now + finish = self.__utc_format(time_val[4:], utc_now) else: - # Don't include any set only in image. - vimg = set() - - # Get all variants found in packages and determine state. - vpkg = {} - excludes = self._img.list_excludes() - vposs = collections.defaultdict(set) - if variant_list != self.VARIANT_IMAGE: - # Only incur the overhead of reading through all - # installed packages if not just listing variants set in - # image or listing possible values for them. - for f in self._img.gen_installed_pkgs(): - # The manifest must be loaded without - # pre-applying excludes so that gen_variants() - # can choose how to filter the actions. - mfst = self._img.get_manifest(f, - ignore_excludes=True) - for variant, vals in mfst.gen_variants( - excludes=excludes): - if variant not in vimg: - # Although rare, packages with - # unknown variants (those not - # set in the image) can be - # installed as long as content - # does not conflict. For those - # variants, return None. This - # is done without using get() as - # that would cause None to be - # returned for implicitly set - # variants (e.g. debug). - try: - vpkg[variant] = \ - variants[variant] - except KeyError: - vpkg[variant] = None - - if (variant_list == \ - self.VARIANT_ALL_POSSIBLE or - variant_list == \ - self.VARIANT_IMAGE_POSSIBLE or - variant_list == \ - self.VARIANT_INSTALLED_POSSIBLE): - # Build possible list of variant - # values. - vposs[variant].update(set(vals)) - - # If caller wants implicit values, include non-glob debug - # patterns (even if not found) in results unless only installed - # variants were requested. - iset = set() - if implicit and variant_list != self.VARIANT_INSTALLED and \ - variant_list != self.VARIANT_INSTALLED_POSSIBLE: - # Normalize patterns. - iset = set( - p.startswith("variant.") and p or ("variant." + p) - for p in patterns - if "*" not in p and "?" not in p - ) - # Only debug variants can have an implicit value. - iset = set( - p - for p in iset - if p.startswith("variant.debug.") - ) - vlist = sorted(vimg | set(vpkg.keys()) | iset) + start = self.__utc_format(time_val[:19], utc_now) + finish = self.__utc_format(time_val[20:], utc_now) + if start > finish: + raise apx.HistoryRequestException( + _( + "Start time must be older than " "finish time: {0}" + ).format(time_val) + ) + files = self.__get_history_range(start, finish) + else: + # There can be multiple entries per timestamp. + prefix = self.__utc_format(time_val, utc_now) + files = glob.glob( + os.path.join(self._img.history.path, "{0}*".format(prefix)) + ) + + try: + files = self.__get_history_paths(time_val, utc_now) + entries.update(files) + except ValueError: + raise apx.HistoryRequestException( + _( + "Invalid " + "time format '{0}'. Please use " + "%Y-%m-%dT%H:%M:%S or\n" + "%Y-%m-%dT%H:%M:%S-" + "%Y-%m-%dT%H:%M:%S" + ).format(time_val) + ) + + if not times: + try: + entries = os.listdir(self._img.history.path) + except EnvironmentError as e: + if e.errno == errno.ENOENT: + # No history to list. + return + raise apx._convert_error(e) + + entries = sorted(entries) + if limit: + limit *= -1 + entries = entries[limit:] - # Generate the results. - for name in misc.yield_matching("variant.", vlist, patterns): - try: - yield (name, vpkg[name], sorted(vposs[name])) - except KeyError: - yield (name, variants[name], - sorted(vposs[name])) + try: + uuid_be_dic = bootenv.BootEnv.get_uuid_be_dic() + except apx.ApiException as e: + uuid_be_dic = {} + + for entry in entries: + # Yield each history entry object as it is loaded. + try: + yield history.History( + root_dir=self._img.history.root_dir, + filename=entry, + uuid_be_dic=uuid_be_dic, + ) + except apx.HistoryLoadException as e: + if e.parse_failure: + # Ignore corrupt entries. + continue + raise - def freeze_pkgs(self, fmri_strings, dry_run=False, comment=None, - unfreeze=False): - """Freeze/Unfreeze one or more packages.""" + def get_linked_name(self): + """If the current image is a child image, this function + returns a linked image name object which represents the name + of the current image.""" + return self._img.linked.child_name - # Comment is only a valid parameter if a freeze is happening. - assert not comment or not unfreeze + def get_linked_props(self, lin=None): + """Return a dictionary which represents the linked image + properties associated with a linked image. - self._acquire_activity_lock() - try: - if unfreeze: - return self._img.unfreeze_pkgs(fmri_strings, - progtrack=self.__progresstracker, - check_cancel=self.__check_cancel, - dry_run=dry_run) - else: - return self._img.freeze_pkgs(fmri_strings, - progtrack=self.__progresstracker, - check_cancel=self.__check_cancel, - dry_run=dry_run, comment=comment) - finally: - self._activity_lock.release() + 'lin' is the name of the child image. If lin is None then + the current image is assumed to be a linked image and it's + properties are returned.""" - def get_frozen_list(self): - """Return list of tuples of (pkg fmri, reason package was - frozen, timestamp when package was frozen).""" + return self._img.linked.child_props(lin=lin) - return self._img.get_frozen_list() + def list_linked(self, li_ignore=None): + """Returns a list of linked images associated with the + current image. This includes both child and parent images. - def cleanup_cached_content(self, verbose=False): - """Clean up any cached content.""" + For all parameters, refer to the 'gen_plan_install' function + for an explanation of their usage and effects. - self._acquire_activity_lock() - try: - return self._img.cleanup_cached_content( - progtrack=self.__progresstracker, force=True, - verbose=verbose) - finally: - self._activity_lock.release() + The returned value is a list of tuples where each tuple + contains (
  • , ,
  • ).""" - def __plan_common_exception(self, log_op_end_all=False): - """Deal with exceptions that can occur while planning an - operation. Any exceptions generated here are passed - onto the calling context. By default all exceptions - will result in a call to self.log_operation_end() before - they are passed onto the calling context.""" + return self._img.linked.list_related(li_ignore=li_ignore) - exc_type, exc_value, exc_traceback = sys.exc_info() + def parse_linked_name(self, li_name, allow_unknown=False): + """Given a string representing a linked image child name, + returns linked image name object representing the same name. - if exc_type == apx.PlanCreationException: - self.__set_history_PlanCreationException(exc_value) - elif exc_type == apx.CanceledException: - self._cancel_done() - elif exc_type == apx.ConflictingActionErrors: - self.log_operation_end(error=str(exc_value), - result=RESULT_CONFLICTING_ACTIONS) - elif exc_type in [ - apx.IpkgOutOfDateException, - fmri.IllegalFmri]: - self.log_operation_end(error=exc_value) - elif log_op_end_all: - self.log_operation_end(error=exc_value) - - if exc_type not in (apx.ImageLockedError, - apx.ImageLockingFailedError): - # Must be called before reset_unlock, and only if - # the exception was not a locked error. - self._img.unlock() + 'allow_unknown' indicates whether the name must represent + actual children or simply be syntactically correct.""" - try: - if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: - self._img.transport.stats.dump() - except ValueError: - # Don't generate stats if an invalid value - # is supplied. - pass - - # In the case of duplicate actions, we want to save off the plan - # description for display to the client (if they requested it), - # as once the solver's done its job, there's interesting - # information in the plan. We have to save it here and restore - # it later because __reset_unlock() torches it. - if exc_type == apx.ConflictingActionErrors: - self._img.imageplan.set_be_options(self.__backup_be, - self.__backup_be_name, self.__new_be, - self.__be_activate, self.__be_name) - plan_desc = self._img.imageplan.describe() + return self._img.linked.parse_name(li_name, allow_unknown) - self.__reset_unlock() + def parse_linked_name_list(self, li_name_list, allow_unknown=False): + """Given a list of strings representing linked image child + names, returns a list of linked image name objects + representing the same names. - if exc_type == apx.ConflictingActionErrors: - self.__plan_desc = plan_desc - - self._activity_lock.release() - - # re-raise the original exception. (we have to explicitly - # restate the original exception since we may have cleared the - # current exception scope above.) - six.reraise(exc_type, exc_value, exc_traceback) - - def solaris_image(self): - """Returns True if the current image is a solaris image, or an - image which contains the pkg(7) packaging system.""" - - # First check to see if the special package "release/name" - # exists and contains metadata saying this is Solaris. - results = self.__get_pkg_list(self.LIST_INSTALLED, - patterns=["release/name"], return_fmris=True) - results = [e for e in results] - if results: - pfmri, summary, categories, states, attrs = results[0] - mfst = self._img.get_manifest(pfmri) - osname = mfst.get("pkg.release.osname", None) - if osname == "sunos": - return True - - # Otherwise, see if we can find package/pkg (or SUNWipkg) and - # system/core-os (or SUNWcs). - results = self.__get_pkg_list(self.LIST_INSTALLED, - patterns=["/package/pkg", "SUNWipkg", "/system/core-os", - "SUNWcs"]) - installed = set(e[0][1] for e in results) - if ("SUNWcs" in installed or "system/core-os" in installed) and \ - ("SUNWipkg" in installed or "package/pkg" in installed): - return True + For all other parameters, refer to the 'parse_linked_name' + function for an explanation of their usage and effects.""" - return False + return [ + self.parse_linked_name(li_name, allow_unknown) + for li_name in li_name_list + ] - def __ipkg_require_latest(self, noexecute): - """Raises an IpkgOutOfDateException if the current image - contains the pkg(7) packaging system and a newer version - of the pkg(7) packaging system is installable.""" + def describe(self): + """Returns None if no plan is ready yet, otherwise returns + a PlanDescription.""" - if not self.solaris_image(): - return + return self.__plan_desc - # Get old purpose in order to be able to restore it on return. - p = self.__progresstracker.get_purpose() + def prepare(self): + """Takes care of things which must be done before the plan can + be executed. This includes downloading the packages to disk and + preparing the indexes to be updated during execution. Should + only be called once a gen_plan_*() method has been called. If + a plan is abandoned after calling this method, reset() should + be called.""" - try: - # - # Let progress tracker know that subsequent callbacks - # into it will all be in service of update checking. - # Note that even though this might return, the - # finally: will still reset the purpose. - # - self.__progresstracker.set_purpose( - self.__progresstracker.PURPOSE_PKG_UPDATE_CHK) - if self._img.ipkg_is_up_to_date( - self.__check_cancel, noexecute, - refresh_allowed=False, - progtrack=self.__progresstracker): - return - except apx.ImageNotFoundException: - # Can't do anything in this - # case; so proceed. - return - finally: - self.__progresstracker.set_purpose(p) - - raise apx.IpkgOutOfDateException() - - def __verify_args(self, args): - """Verifies arguments passed into the API. - It tests for correct data types of the input args, verifies that - passed in FMRIs are valid, checks if repository URIs are valid - and does some logical tests for the combination of arguments.""" - - arg_types = { - # arg name type nullable - "_act_timeout": (int, False), - "_be_activate": ('activate', False), - "_be_name": (six.string_types, True), - "_backup_be": (bool, True), - "_backup_be_name": (six.string_types, True), - "_ignore_missing": (bool, False), - "_ipkg_require_latest": (bool, False), - "_li_erecurse": (iter, True), - "_li_ignore": (iter, True), - "_li_md_only": (bool, False), - "_li_parent_sync": (bool, False), - "_new_be": (bool, True), - "_noexecute": (bool, False), - "_pubcheck": (bool, False), - "_refresh_catalogs": (bool, False), - "_repos": (iter, True), - "_update_index": (bool, False), - "facets": (dict, True), - "mediators": (iter, True), - "pkgs_inst": (iter, True), - "pkgs_to_uninstall": (iter, True), - "pkgs_update": (iter, True), - "reject_list": (iter, True), - "variants": (dict, True), - } - - # merge kwargs into the main arg dict - if "kwargs" in args: - for name, value in args["kwargs"].items(): - args[name] = value - - # check arguments for proper type and nullability - for a in args: - try: - a_type, nullable = arg_types[a] - except KeyError: - # unknown argument passed, ignore - continue + self._acquire_activity_lock() + try: + self._img.lock() + except: + self._activity_lock.release() + raise - assert nullable or args[a] is not None - - if args[a] is not None and a_type == iter: - try: - iter(args[a]) - except TypeError: - raise AssertionError("{0} is not an " - "iterable".format(a)) - elif a_type == 'activate': - assert isinstance(args[a], bool) or ( - isinstance(args[a], str) and - args[a] == 'bootnext') - else: - assert (args[a] is None or - isinstance(args[a], a_type)), "{0} is " \ - "type {1}; expected {2}".format(a, type(a), - a_type) - - # check if passed FMRIs are valid - illegals = [] - for i in ("pkgs_inst", "pkgs_update", "pkgs_to_uninstall", - "reject_list"): - try: - fmris = args[i] - except KeyError: - continue - if fmris is None: - continue - for pat, err, pfmri, matcher in \ - self.parse_fmri_patterns(fmris): - if not err: - continue - else: - illegals.append(fmris) - - if illegals: - raise apx.PlanCreationException(illegal=illegals) - - # some logical checks - errors = [] - if not args["_new_be"] and args["_be_name"]: - errors.append(apx.InvalidOptionError( - apx.InvalidOptionError.REQUIRED, ["_be_name", - "_new_be"])) - if not args["_backup_be"] and args["_backup_be_name"]: - errors.append(apx.InvalidOptionError( - apx.InvalidOptionError.REQUIRED, ["_backup_be_name", - "_backup_be"])) - if args["_backup_be"] and args["_new_be"]: - errors.append(apx.InvalidOptionError( - apx.InvalidOptionError.INCOMPAT, ["_backup_be", - "_new_be"])) - - if errors: - raise apx.InvalidOptionErrors(errors) - - # check if repo URIs are valid - try: - repos = args["_repos"] - except KeyError: - return + try: + if not self._img.imageplan: + raise apx.PlanMissingException() - if not repos: - return + if not self.__planned_children: + # if we never planned children images then we + # didn't finish planning. + raise apx.PlanMissingException() - illegals = [] - for r in repos: - valid = False - if type(r) == publisher.RepositoryURI: - # RepoURI objects pass right away - continue + if self.__prepared: + raise apx.AlreadyPreparedException() - if not misc.valid_pub_url(r): - illegals.append(r) - - if illegals: - raise apx.UnsupportedRepositoryURI(illegals) - - def __plan_op(self, _op, _act_timeout=0, _ad_kwargs=None, - _backup_be=None, _backup_be_name=None, _be_activate=True, - _be_name=None, _ipkg_require_latest=False, _li_ignore=None, - _li_erecurse=None, _li_md_only=False, _li_parent_sync=True, - _new_be=False, _noexecute=False, _pubcheck=True, - _refresh_catalogs=True, _repos=None, _update_index=True, **kwargs): - """Contructs a plan to change the package or linked image - state of an image. - - We can raise PermissionsException, PlanCreationException, - InventoryException, or LinkedImageException. - - Arguments prefixed with '_' are primarily used within this - function. All other arguments must be specified via keyword - assignment and will be passed directly on to the image - interfaces being invoked." - - '_op' is the API operation we will perform. - - '_ad_kwargs' is only used dyring attach or detach and it - is a dictionary of arguments that will be passed to the - linked image attach/detach interfaces. - - '_ipkg_require_latest' enables a check to verify that the - latest installable version of the pkg(7) packaging system is - installed before we proceed with the requested operation. - - For all other '_' prefixed parameters, please refer to the - 'gen_plan_*' functions which invoke this function for an - explanation of their usage and effects. - - This function first yields the plan description for the global - zone, then either a series of dictionaries representing the - parsable output from operating on the child images or a series - of None values.""" - - # sanity checks - assert _op in api_op_values - assert _ad_kwargs is None or \ - _op in [API_OP_ATTACH, API_OP_DETACH] - assert _ad_kwargs != None or \ - _op not in [API_OP_ATTACH, API_OP_DETACH] - assert not _li_md_only or \ - _op in [API_OP_ATTACH, API_OP_DETACH, API_OP_SYNC] - assert not _li_md_only or _li_parent_sync - - self.__verify_args(locals()) - - # make some perf optimizations - if _li_md_only: - _refresh_catalogs = _update_index = False - if _op in [API_OP_DETACH, API_OP_SET_MEDIATOR, API_OP_FIX, - API_OP_VERIFY, API_OP_DEHYDRATE, API_OP_REHYDRATE]: - # these operations don't change fmris and don't need - # to recurse, so disable a bunch of linked image - # operations. - _li_parent_sync = False - _pubcheck = False - _li_ignore = [] # ignore all children - - # All the image interface functions that we invoke have some - # common arguments. Set those up now. - args_common = {} - args_common["op"] = _op - args_common["progtrack"] = self.__progresstracker - args_common["check_cancel"] = self.__check_cancel - args_common["noexecute"] = _noexecute - - # make sure there is no overlap between the common arguments - # supplied to all api interfaces and the arguments that the - # api arguments that caller passed to this function. - assert (set(args_common) & set(kwargs)) == set(), \ - "{0} & {1} != set()".format(str(set(args_common)), - str(set(kwargs))) - kwargs.update(args_common) + self._enable_cancel() - try: - # Lock the current image. - self.__plan_common_start(_op, _noexecute, _backup_be, - _backup_be_name, _new_be, _be_name, _be_activate) + try: + self._img.imageplan.preexecute() + except search_errors.ProblematicPermissionsIndexException as e: + raise apx.ProblematicPermissionsIndexException(e) + except: + raise - except: - raise + self._disable_cancel() + self.__prepared = True + except apx.CanceledException as e: + self._cancel_done() + if self._img.history.operation_name: + # If an operation is in progress, log + # the error and mark its end. + self.log_operation_end(error=e) + raise + except Exception as e: + self._cancel_cleanup_exception() + if self._img.history.operation_name: + # If an operation is in progress, log + # the error and mark its end. + self.log_operation_end(error=e) + raise + except: + # Handle exceptions that are not subclasses of + # Exception. + self._cancel_cleanup_exception() + if self._img.history.operation_name: + # If an operation is in progress, log + # the error and mark its end. + exc_type, exc_value, exc_traceback = sys.exc_info() + self.log_operation_end(error=exc_type) + raise + finally: + self._img.cleanup_downloads() + self._img.unlock() + try: + if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: + self._img.transport.stats.dump() + except ValueError: + # Don't generate stats if an invalid value + # is supplied. + pass + self._activity_lock.release() + + self._img.linked.api_recurse_prepare(self.__progresstracker) + + def execute_plan(self): + """Executes the plan. This is uncancelable once it begins. + Should only be called after the prepare method has been + called. After plan execution, reset() should be called.""" + + self._acquire_activity_lock() + try: + self._disable_cancel() + self._img.lock() + except: + self._activity_lock.release() + raise + try: + if not self._img.imageplan: + raise apx.PlanMissingException() + + if not self.__prepared: + raise apx.PrematureExecutionException() + + if self.__executed: + raise apx.AlreadyExecutedException() + + try: + be = bootenv.BootEnv(self._img, self.__progresstracker) + except RuntimeError: + be = bootenv.BootEnvNull(self._img) + self._img.bootenv = be + + if ( + not self.__new_be + and self._img.imageplan.reboot_needed() + and self._img.is_liveroot() + ): + e = apx.RebootNeededOnLiveImageException() + self.log_operation_end(error=e) + raise e + + # Before proceeding, create a backup boot environment if + # requested. + if self.__backup_be: try: - if _op == API_OP_ATTACH: - self._img.linked.attach_parent(**_ad_kwargs) - elif _op == API_OP_DETACH: - self._img.linked.detach_parent(**_ad_kwargs) - - if _li_parent_sync: - # refresh linked image data from parent image. - self._img.linked.syncmd_from_parent() - - # initialize recursion state - self._img.linked.api_recurse_init( - li_ignore=_li_ignore, repos=_repos) - - if _pubcheck: - # check that linked image pubs are in sync - self.__linked_pubcheck(_op) - - if _refresh_catalogs: - self.__refresh_publishers() - - if _ipkg_require_latest: - # If this is an image update then make - # sure the latest version of the ipkg - # software is installed. - self.__ipkg_require_latest(_noexecute) - - self.__set_img_alt_sources(_repos) - - if _li_md_only: - self._img.make_noop_plan(**args_common) - elif _op in [API_OP_ATTACH, API_OP_DETACH, API_OP_SYNC]: - self._img.make_sync_plan(**kwargs) - elif _op in [API_OP_CHANGE_FACET, - API_OP_CHANGE_VARIANT]: - self._img.make_change_varcets_plan(**kwargs) - elif _op == API_OP_DEHYDRATE: - self._img.make_dehydrate_plan(**kwargs) - elif _op == API_OP_INSTALL or \ - _op == API_OP_EXACT_INSTALL: - self._img.make_install_plan(**kwargs) - elif _op in [API_OP_FIX, API_OP_VERIFY]: - self._img.make_fix_plan(**kwargs) - elif _op == API_OP_REHYDRATE: - self._img.make_rehydrate_plan(**kwargs) - elif _op == API_OP_REVERT: - self._img.make_revert_plan(**kwargs) - elif _op == API_OP_SET_MEDIATOR: - self._img.make_set_mediators_plan(**kwargs) - elif _op == API_OP_UNINSTALL: - self._img.make_uninstall_plan(**kwargs) - elif _op == API_OP_UPDATE: - self._img.make_update_plan(**kwargs) - else: - raise RuntimeError( - "Unknown api op: {0}".format(_op)) - - self.__api_op = _op - - if self._img.imageplan.nothingtodo(): - # no package changes mean no index changes - _update_index = False - - self._disable_cancel() - self.__set_be_creation() - self._img.imageplan.set_be_options( - self.__backup_be, self.__backup_be_name, - self.__new_be, self.__be_activate, self.__be_name) - self.__plan_desc = self._img.imageplan.describe() - if not _noexecute: - self.__plan_type = self.__plan_desc.plan_type - - if _act_timeout != 0: - self.__plan_desc.set_actuator_timeout( - _act_timeout) - - # Yield to our caller so they can display our plan - # before we recurse into child images. Drop the - # activity lock before yielding because otherwise the - # caller can't do things like set the displayed - # license state for pkg plans). - self._activity_lock.release() - yield self.__plan_desc - self._activity_lock.acquire() - - # plan operation in child images. This currently yields - # either a dictionary representing the parsable output - # from the child image operation, or None. Eventually - # these will yield plan descriptions objects instead. - - for p_dict in self._img.linked.api_recurse_plan( - api_kwargs=kwargs, erecurse_list=_li_erecurse, - refresh_catalogs=_refresh_catalogs, - update_index=_update_index, - progtrack=self.__progresstracker): - yield p_dict - - self.__planned_children = True - + be.create_backup_be(be_name=self.__backup_be_name) + except Exception as e: + self.log_operation_end(error=e) + raise except: - if _op in [ - API_OP_UPDATE, - API_OP_INSTALL, - API_OP_REVERT, - API_OP_SYNC]: - self.__plan_common_exception( - log_op_end_all=True) - else: - self.__plan_common_exception() - # NOTREACHED - - stuff_to_do = not self.planned_nothingtodo() - - if not stuff_to_do or _noexecute: - self.log_operation_end( - result=RESULT_NOTHING_TO_DO) - - self._img.imageplan.update_index = _update_index - self.__plan_common_finish() - - # Value 'DebugValues' is unsubscriptable; - # pylint: disable=E1136 - if DebugValues["plandesc_validate"]: - # save, load, and get a new json copy of the plan, - # then compare that new copy against our current one. - # this regressions tests the plan save/load code. - pd_json1 = self.__plan_desc.getstate(self.__plan_desc, - reset_volatiles=True) - fobj = tempfile.TemporaryFile(mode="w+") - json.dump(pd_json1, fobj) - pd_new = plandesc.PlanDescription(_op) - pd_new._load(fobj) - pd_json2 = pd_new.getstate(pd_new, reset_volatiles=True) - fobj.close() - del fobj, pd_new - pkg.misc.json_diff("PlanDescription", \ - pd_json1, pd_json2, pd_json1, pd_json2) - del pd_json1, pd_json2 - - @_LockedCancelable() - def load_plan(self, plan, prepared=False): - """Load a previously generated PlanDescription.""" - - # Prevent loading a plan if one has been already. - if self.__plan_type is not None: - raise apx.PlanExistsException(self.__plan_type) - - # grab image lock. we don't worry about dropping the image - # lock since __activity_lock will drop it for us us after we - # return (or if we generate an exception). - self._img.lock() - - # load the plan - self.__plan_desc = plan - self.__plan_type = plan.plan_type - self.__planned_children = True - self.__prepared = prepared - - # load BE related plan settings - self.__new_be = plan.new_be - self.__be_activate = plan.activate_be - self.__be_name = plan.be_name - - # sanity check: verify the BE name - if self.__be_name is not None: - self.check_be_name(self.__be_name) - if not self._img.is_liveroot(): - raise apx.BENameGivenOnDeadBE(self.__be_name) - - # sanity check: verify that all the fmris in the plan are in - # the known catalog - pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_KNOWN) - for pp in plan.pkg_plans: - if pp.destination_fmri: - assert pkg_cat.get_entry(pp.destination_fmri), \ - "fmri part of plan, but currently " \ - "unknown: {0}".format(pp.destination_fmri) - - # allocate an image plan based on the supplied plan - self._img.imageplan = imageplan.ImagePlan(self._img, plan._op, - self.__progresstracker, check_cancel=self.__check_cancel, - pd=plan) - - if prepared: - self._img.imageplan.skip_preexecute() - - # create a history entry - self.log_operation_start(plan.plan_type) - - def __linked_pubcheck(self, api_op=None): - """Private interface to perform publisher check on this image - and its children.""" - - if api_op in [API_OP_DETACH, API_OP_SET_MEDIATOR]: - # we don't need to do a pubcheck for detach or - # changing mediators - return - - # check the current image - self._img.linked.pubcheck() - - # check child images - self._img.linked.api_recurse_pubcheck(self.__progresstracker) - - @_LockedCancelable() - def linked_publisher_check(self): - """If we're a child image, verify that the parent image's - publisher configuration is a subset of the child image's - publisher configuration. If we have any children, recurse - into them and perform a publisher check.""" - - # grab image lock. we don't worry about dropping the image - # lock since __activity_lock will drop it for us us after we - # return (or if we generate an exception). - self._img.lock(allow_unprivileged=True) - - # get ready to recurse - self._img.linked.api_recurse_init() - - # check that linked image pubs are in sync - self.__linked_pubcheck() - - @_LockedCancelable() - def hotfix_origin_cleanup(self): - - # grab image lock. Cleanup is handled by the decorator. - self._img.lock(allow_unprivileged=False) - - # prepare for recursion - self._img.linked.api_recurse_init() - - # clean up the image - self._img.hotfix_origin_cleanup() - - # clean up children - self._img.linked.api_recurse_hfo_cleanup(self.__progresstracker) - - def planned_nothingtodo(self, li_ignore_all=False): - """Once an operation has been planned check if there is - something todo. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'li_ignore_all' indicates if we should only report on work - todo in the parent image. (i.e., if an operation was planned - and that operation only involves changes to children, and - li_ignore_all is true, then we'll report that there's nothing - todo.""" + # Handle exceptions that are not + # subclasses of Exception. + exc_type, exc_value, exc_traceback = sys.exc_info() + self.log_operation_end(error=exc_type) + raise + + # After (possibly) creating backup be, determine if + # operation should execute on a clone of current BE. + if self.__new_be: + try: + be.init_image_recovery(self._img, self.__be_name) + except Exception as e: + self.log_operation_end(error=e) + raise + except: + # Handle exceptions that are not + # subclasses of Exception. + exc_type, exc_value, exc_traceback = sys.exc_info() + self.log_operation_end(error=exc_type) + raise + # check if things gained underneath us + if self._img.is_liveroot(): + e = apx.UnableToCopyBE() + self.log_operation_end(error=e) + raise e + + raise_later = None + + # we're about to execute a plan so change our current + # working directory to / so that we won't fail if we + # try to remove our current working directory + os.chdir(os.sep) + + try: + try: + self._img.imageplan.execute() + except apx.WrapIndexingException as e: + raise_later = e - if not self._img.imageplan: - # if theres no plan there nothing to do - return True - if not self._img.imageplan.nothingtodo(): - return False if not self._img.linked.nothingtodo(): - return False - if not li_ignore_all: - assert self.__planned_children - if not self._img.linked.recurse_nothingtodo(): - return False - return True - - def plan_update(self, pkg_list, refresh_catalogs=True, - reject_list=misc.EmptyI, noexecute=False, update_index=True, - be_name=None, new_be=False, repos=None, be_activate=True): - """DEPRECATED. use gen_plan_update().""" - for pd in self.gen_plan_update( - pkgs_update=pkg_list, refresh_catalogs=refresh_catalogs, - reject_list=reject_list, noexecute=noexecute, - update_index=update_index, be_name=be_name, new_be=new_be, - repos=repos, be_activate=be_activate): - continue - return not self.planned_nothingtodo() - - def plan_update_all(self, refresh_catalogs=True, - reject_list=misc.EmptyI, noexecute=False, force=False, - update_index=True, be_name=None, new_be=True, repos=None, - be_activate=True): - """DEPRECATED. use gen_plan_update().""" - for pd in self.gen_plan_update( - refresh_catalogs=refresh_catalogs, reject_list=reject_list, - noexecute=noexecute, force=force, - update_index=update_index, be_name=be_name, new_be=new_be, - repos=repos, be_activate=be_activate): - continue - return (not self.planned_nothingtodo(), self.solaris_image()) - - def gen_plan_update(self, pkgs_update=None, act_timeout=0, - backup_be=None, backup_be_name=None, be_activate=True, be_name=None, - force=False, ignore_missing=False, li_ignore=None, - li_parent_sync=True, li_erecurse=None, new_be=True, noexecute=False, - pubcheck=True, refresh_catalogs=True, reject_list=misc.EmptyI, - repos=None, update_index=True): - - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - If pkgs_update is not set, constructs a plan to update all - packages on the system to the latest known versions. Once an - operation has been planned, it may be executed by first - calling prepare(), and then execute_plan(). After execution - of a plan, or to abandon a plan, reset() should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - If 'pkgs_update' is set, constructs a plan to update the - packages provided in pkgs_update. - - Once an operation has been planned, it may be executed by - first calling prepare(), and then execute_plan(). - - 'force' indicates whether update should skip the package - system up to date check. - - 'ignore_missing' indicates whether update should ignore packages - which are not installed. - - 'pubcheck' indicates that we should skip the child image - publisher check before creating a plan for this image. only - pkg.1 should use this parameter, other callers should never - specify it. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - if pkgs_update or force: - ipkg_require_latest = False + self._img.linked.syncmd() + except RuntimeError as e: + if self.__new_be: + be.restore_image() else: - ipkg_require_latest = True - - op = API_OP_UPDATE - return self.__plan_op(op, - _act_timeout=act_timeout, _backup_be=backup_be, - _backup_be_name=backup_be_name, _be_activate=be_activate, - _be_name=be_name, _ipkg_require_latest=ipkg_require_latest, - _li_ignore=li_ignore, _li_parent_sync=li_parent_sync, - _li_erecurse=li_erecurse, _new_be=new_be, - _noexecute=noexecute, _pubcheck=pubcheck, - _refresh_catalogs=refresh_catalogs, _repos=repos, - _update_index=update_index, ignore_missing=ignore_missing, - pkgs_update=pkgs_update, reject_list=reject_list, - ) - - def plan_install(self, pkg_list, refresh_catalogs=True, - noexecute=False, update_index=True, be_name=None, - reject_list=misc.EmptyI, new_be=False, repos=None, - be_activate=True): - """DEPRECATED. use gen_plan_install().""" - for pd in self.gen_plan_install( - pkgs_inst=pkg_list, refresh_catalogs=refresh_catalogs, - noexecute=noexecute, update_index=update_index, - be_name=be_name, reject_list=reject_list, new_be=new_be, - repos=repos, be_activate=be_activate): - continue - return not self.planned_nothingtodo() - - def gen_plan_install(self, pkgs_inst, act_timeout=0, backup_be=None, - backup_be_name=None, be_activate=True, be_name=None, - li_erecurse=None, li_ignore=None, li_parent_sync=True, new_be=False, - noexecute=False, pubcheck=True, refresh_catalogs=True, - reject_list=misc.EmptyI, repos=None, update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Constructs a plan to install the packages provided in - pkgs_inst. Once an operation has been planned, it may be - executed by first calling prepare(), and then execute_plan(). - After execution of a plan, or to abandon a plan, reset() - should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'act_timeout' sets the timeout for synchronous actuators in - seconds, -1 is no timeout, 0 is for using asynchronous - actuators. - - 'backup_be' indicates whether a backup boot environment should - be created before the operation is executed. If True, a backup - boot environment will be created. If False, a backup boot - environment will not be created. If None and a new boot - environment is not created, and packages are being updated or - are being installed and tagged with reboot-needed, a backup - boot environment will be created. - - 'backup_be_name' is a string to use as the name of any backup - boot environment created during the operation. - - 'be_activate' is an optional boolean indicating whether any - new boot environment created for the operation should be set - as the active one on next boot if the operation is successful. - - 'be_name' is a string to use as the name of any new boot - environment created during the operation. - - 'li_erecurse' is either None or a list. If it's None (the - default), the planning operation will not explicitly recurse - into linked children to perform the requested operation. If this - is a list of linked image children names, the requested - operation will be performed in each of the specified - children. - - 'li_ignore' is either None or a list. If it's None (the - default), the planning operation will attempt to keep all - linked children in sync. If it's an empty list the planning - operation will ignore all children. If this is a list of - linked image children names, those children will be ignored - during the planning operation. If a child is ignored during - the planning phase it will also be skipped during the - preparation and execution phases. - - 'li_parent_sync' if the current image is a child image, this - flag controls whether the linked image parent metadata will be - automatically refreshed. - - 'new_be' indicates whether a new boot environment should be - created during the operation. If True, a new boot environment - will be created. If False, and a new boot environment is - needed, an ImageUpdateOnLiveImageException will be raised. - If None, a new boot environment will be created only if needed. - - 'noexecute' determines whether the resulting plan can be - executed and whether history will be recorded after - planning is finished. - - 'pkgs_inst' is a list of packages to install. - - 'refresh_catalogs' controls whether the catalogs will - automatically be refreshed. - - 'reject_list' is a list of patterns not to be permitted - in solution; installed packages matching these patterns - are removed. - - 'repos' is a list of URI strings or RepositoryURI objects that - represent the locations of additional sources of package data to - use during the planned operation. All API functions called - while a plan is still active will use this package data. - - 'update_index' determines whether client search indexes - will be updated after operation completion during plan - execution.""" - - # certain parameters must be specified - assert pkgs_inst and type(pkgs_inst) == list - - op = API_OP_INSTALL - return self.__plan_op(op, _act_timeout=act_timeout, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_erecurse=li_erecurse, _li_ignore=li_ignore, - _li_parent_sync=li_parent_sync, _new_be=new_be, - _noexecute=noexecute, _pubcheck=pubcheck, - _refresh_catalogs=refresh_catalogs, _repos=repos, - _update_index=update_index, pkgs_inst=pkgs_inst, - reject_list=reject_list, ) - - def gen_plan_exact_install(self, pkgs_inst, backup_be=None, - backup_be_name=None, be_activate=True, be_name=None, li_ignore=None, - li_parent_sync=True, new_be=False, noexecute=False, - refresh_catalogs=True, reject_list=misc.EmptyI, repos=None, - update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Constructs a plan to install exactly the packages provided in - pkgs_inst. Once an operation has been planned, it may be - executed by first calling prepare(), and then execute_plan(). - After execution of a plan, or to abandon a plan, reset() - should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'pkgs_inst' is a list of packages to install exactly. - - For all other parameters, refer to 'gen_plan_install' - for an explanation of their usage and effects.""" - - # certain parameters must be specified - assert pkgs_inst and type(pkgs_inst) == list - - op = API_OP_EXACT_INSTALL - return self.__plan_op(op, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_ignore=li_ignore, _li_parent_sync=li_parent_sync, - _new_be=new_be, _noexecute=noexecute, - _refresh_catalogs=refresh_catalogs, _repos=repos, - _update_index=update_index, pkgs_inst=pkgs_inst, - reject_list=reject_list) - - def gen_plan_sync(self, backup_be=None, backup_be_name=None, - be_activate=True, be_name=None, li_ignore=None, li_md_only=False, - li_parent_sync=True, li_pkg_updates=True, new_be=False, - noexecute=False, pubcheck=True, refresh_catalogs=True, - reject_list=misc.EmptyI, repos=None, update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Constructs a plan to sync the current image with its - linked image constraints. Once an operation has been planned, - it may be executed by first calling prepare(), and then - execute_plan(). After execution of a plan, or to abandon a - plan, reset() should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'li_md_only' don't actually modify any packages in the current - images, only sync the linked image metadata from the parent - image. If this options is True, 'li_parent_sync' must also be - True. - - 'li_pkg_updates' when planning a sync operation, allow updates - to packages other than the constraints package. If this - option is False, planning a sync will fail if any packages - (other than the constraints package) need updating to bring - the image in sync with its parent. - - For all other parameters, refer to 'gen_plan_install' and - 'gen_plan_update' for an explanation of their usage and - effects.""" - - # we should only be invoked on a child image. - if not self.ischild(): - raise apx.LinkedImageException( - self_not_child=self._img_path) - - op = API_OP_SYNC - return self.__plan_op(op, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_ignore=li_ignore, _li_md_only=li_md_only, - _li_parent_sync=li_parent_sync, _new_be=new_be, - _noexecute=noexecute, _pubcheck=pubcheck, - _refresh_catalogs=refresh_catalogs, - _repos=repos, - _update_index=update_index, - li_pkg_updates=li_pkg_updates, reject_list=reject_list) - - def gen_plan_attach(self, lin, li_path, allow_relink=False, - backup_be=None, backup_be_name=None, be_activate=True, be_name=None, - force=False, li_ignore=None, li_md_only=False, li_pkg_updates=True, - li_props=None, new_be=False, noexecute=False, refresh_catalogs=True, - reject_list=misc.EmptyI, repos=None, update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Attach a parent image and sync the packages in the current - image with the new parent. Once an operation has been - planned, it may be executed by first calling prepare(), and - then execute_plan(). After execution of a plan, or to abandon - a plan, reset() should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'lin' a LinkedImageName object that is a name for the current - image. - - 'li_path' a path to the parent image. - - 'allow_relink' allows re-linking of an image that is already a - linked image child. If this option is True we'll overwrite - all existing linked image metadata. - - 'li_props' optional linked image properties to apply to the - child image. - - For all other parameters, refer to the 'gen_plan_install' and - 'gen_plan_sync' functions for an explanation of their usage - and effects.""" - - if li_props is None: - li_props = dict() - - op = API_OP_ATTACH - ad_kwargs = { - "allow_relink": allow_relink, - "force": force, - "lin": lin, - "path": li_path, - "props": li_props, - } - return self.__plan_op(op, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_ignore=li_ignore, _li_md_only=li_md_only, - _new_be=new_be, _noexecute=noexecute, - _refresh_catalogs=refresh_catalogs, _repos=repos, - _update_index=update_index, _ad_kwargs=ad_kwargs, - li_pkg_updates=li_pkg_updates, reject_list=reject_list) - - def gen_plan_detach(self, backup_be=None, - backup_be_name=None, be_activate=True, be_name=None, force=False, - li_ignore=None, li_md_only=False, li_pkg_updates=True, new_be=False, - noexecute=False): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Detach from a parent image and remove any constraints - package from this image. Once an operation has been planned, - it may be executed by first calling prepare(), and then - execute_plan(). After execution of a plan, or to abandon a - plan, reset() should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - For all other parameters, refer to the 'gen_plan_install' and - 'gen_plan_sync' functions for an explanation of their usage - and effects.""" - - op = API_OP_DETACH - ad_kwargs = { - "force": force - } - return self.__plan_op(op, _ad_kwargs=ad_kwargs, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_ignore=li_ignore, _li_md_only=li_md_only, - _new_be=new_be, _noexecute=noexecute, - _refresh_catalogs=False, _update_index=False, - li_pkg_updates=li_pkg_updates) - - def plan_uninstall(self, pkg_list, noexecute=False, update_index=True, - be_name=None, new_be=False, be_activate=True): - """DEPRECATED. use gen_plan_uninstall().""" - for pd in self.gen_plan_uninstall(pkgs_to_uninstall=pkg_list, - noexecute=noexecute, update_index=update_index, - be_name=be_name, new_be=new_be, be_activate=be_activate): - continue - return not self.planned_nothingtodo() - - def gen_plan_uninstall(self, pkgs_to_uninstall, act_timeout=0, - backup_be=None, backup_be_name=None, be_activate=True, - be_name=None, ignore_missing=False, li_ignore=None, - li_parent_sync=True, li_erecurse=None, new_be=False, noexecute=False, - pubcheck=True, update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Constructs a plan to remove the packages provided in - pkgs_to_uninstall. Once an operation has been planned, it may - be executed by first calling prepare(), and then - execute_plan(). After execution of a plan, or to abandon a - plan, reset() should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'ignore_missing' indicates whether uninstall should ignore - packages which are not installed. - - 'pkgs_to_uninstall' is a list of packages to uninstall. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - # certain parameters must be specified - assert pkgs_to_uninstall and type(pkgs_to_uninstall) == list - - op = API_OP_UNINSTALL - return self.__plan_op(op, _act_timeout=act_timeout, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_erecurse=li_erecurse, _li_ignore=li_ignore, - _li_parent_sync=li_parent_sync, _new_be=new_be, - _noexecute=noexecute, _pubcheck=pubcheck, - _refresh_catalogs=False, _update_index=update_index, - ignore_missing=ignore_missing, - pkgs_to_uninstall=pkgs_to_uninstall) - - def gen_plan_set_mediators(self, mediators, backup_be=None, - backup_be_name=None, be_activate=True, be_name=None, li_ignore=None, - li_parent_sync=True, new_be=None, noexecute=False, - update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Creates a plan to change the version and implementation values - for mediators as specified in the provided dictionary. Once an - operation has been planned, it may be executed by first calling - prepare(), and then execute_plan(). After execution of a plan, - or to abandon a plan, reset() should be called. - - Callers should pass all arguments by name assignment and not by - positional order. - - 'mediators' is a dict of dicts of the mediators to set version - and implementation for. If the dict for a given mediator-name - is empty, it will be interpreted as a request to revert the - specified mediator to the default, "optimal" mediation. It - should be of the form: - - { - mediator-name: { - "implementation": mediator-implementation-string, - "version": mediator-version-string - } - } - - 'implementation' is an optional string that specifies the - implementation of the mediator for use in addition to or - instead of 'version'. - - 'version' is an optional string that specifies the version - (expressed as a dot-separated sequence of non-negative - integers) of the mediator for use. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - assert mediators - return self.__plan_op(API_OP_SET_MEDIATOR, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_ignore=li_ignore, _li_parent_sync=li_parent_sync, - mediators=mediators, _new_be=new_be, _noexecute=noexecute, - _refresh_catalogs=False, _update_index=update_index) - - def plan_change_varcets(self, variants=None, facets=None, - noexecute=False, be_name=None, new_be=None, repos=None, - be_activate=True): - """DEPRECATED. use gen_plan_change_varcets().""" - for pd in self.gen_plan_change_varcets( - variants=variants, facets=facets, noexecute=noexecute, - be_name=be_name, new_be=new_be, repos=repos, - be_activate=be_activate): - continue - return not self.planned_nothingtodo() - - def gen_plan_change_varcets(self, facets=None, variants=None, - act_timeout=0, backup_be=None, backup_be_name=None, - be_activate=True, be_name=None, li_erecurse=None, li_ignore=None, - li_parent_sync=True, new_be=None, noexecute=False, pubcheck=True, - refresh_catalogs=True, reject_list=misc.EmptyI, repos=None, - update_index=True): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Creates a plan to change the specified variants and/or - facets for the image. Once an operation has been planned, it - may be executed by first calling prepare(), and then - execute_plan(). After execution of a plan, or to abandon a - plan, reset() should be called. - - Callers should pass all arguments by name assignment and - not by positional order. - - 'facets' is a dict of the facets to change the values of. - - 'variants' is a dict of the variants to change the values of. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - # An empty facets dictionary is allowed because that's how to - # unset all set facets. - if not variants and facets is None: - raise ValueError("Nothing to do") - - invalid_names = [] - if variants: - op = API_OP_CHANGE_VARIANT - # Check whether '*' or '?' is in the input. Currently, - # change-variant does not accept globbing. Also check - # for whitespaces. - for variant in variants: - if "*" in variant or "?" in variant: - raise apx.UnsupportedVariantGlobbing() - if not misc.valid_varcet_name(variant): - invalid_names.append(variant) + be.restore_install_uninstall() + # Must be done after bootenv restore. + self.log_operation_end(error=e) + raise + except search_errors.IndexLockedException as e: + error = apx.IndexLockedException(e) + self.log_operation_end(error=error) + raise error + except search_errors.ProblematicPermissionsIndexException as e: + error = apx.ProblematicPermissionsIndexException(e) + self.log_operation_end(error=error) + raise error + except search_errors.InconsistentIndexException as e: + error = apx.CorruptedIndexException(e) + self.log_operation_end(error=error) + raise error + except NonzeroExitException as e: + # Won't happen during update + be.restore_install_uninstall() + error = apx.ActuatorException(e) + self.log_operation_end(error=error) + raise error + except apx.InvalidMediatorTarget as e: + # Mount a new BE but do not activate it in case the + # missing mediator target will cause a broken system. + # Allows the admin to take the appropriate action. + if self.__new_be: + be.restore_image() + self.log_operation_end(error=e) + raise e + except Exception as e: + if self.__new_be: + be.restore_image() else: - op = API_OP_CHANGE_FACET - for facet in facets: - # Explict check for not None so that we can fix - # a broken system from the past by clearing - # the facet. Neither True of False should be - # allowed for this special facet. - if facet == "facet.version-lock.*" and \ - facets[facet] is not None: - raise apx.UnsupportedFacetChange(facet, - facets[facet]) - if not misc.valid_varcet_name(facet): - invalid_names.append(facet) - if invalid_names: - raise apx.InvalidVarcetNames(invalid_names) - - return self.__plan_op(op, _act_timeout=act_timeout, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_activate=be_activate, _be_name=be_name, - _li_erecurse=li_erecurse, _li_ignore=li_ignore, - _li_parent_sync=li_parent_sync, _new_be=new_be, - _noexecute=noexecute, _pubcheck=pubcheck, - _refresh_catalogs=refresh_catalogs, _repos=repos, - _update_index=update_index, facets=facets, - variants=variants, reject_list=reject_list) - - def plan_revert(self, args, tagged=False, noexecute=True, be_name=None, - new_be=None, be_activate=True): - """DEPRECATED. use gen_plan_revert().""" - for pd in self.gen_plan_revert( - args=args, tagged=tagged, noexecute=noexecute, - be_name=be_name, new_be=new_be, be_activate=be_activate): - continue - return not self.planned_nothingtodo() - - def gen_plan_revert(self, args, backup_be=None, backup_be_name=None, - be_activate=True, be_name=None, new_be=None, noexecute=True, - tagged=False): - """This is a generator function that yields a PlanDescription - object. If parsable_version is set, it also yields dictionaries - containing plan information for child images. - - Plan to revert either files or all files tagged with - specified values. Args contains either path names or tag - names to be reverted, tagged is True if args contains tags. - Once an operation has been planned, it may be executed by - first calling prepare(), and then execute_plan(). After - execution of a plan, or to abandon a plan, reset() should be - called. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - op = API_OP_REVERT - return self.__plan_op(op, _be_activate=be_activate, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_name=be_name, _li_ignore=[], _new_be=new_be, - _noexecute=noexecute, _refresh_catalogs=False, - _update_index=False, args=args, tagged=tagged) - - def gen_plan_dehydrate(self, publishers=None, noexecute=True): - """This is a generator function that yields a PlanDescription - object. - - Plan to remove non-editable files and hardlinks from an image. - Once an operation has been planned, it may be executed by - first calling prepare(), and then execute_plan(). After - execution of a plan, or to abandon a plan, reset() should be - called. - - 'publishers' is a list of publishers to dehydrate. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - op = API_OP_DEHYDRATE - return self.__plan_op(op, _noexecute=noexecute, - _refresh_catalogs=False, _update_index=False, - publishers=publishers) - - def gen_plan_rehydrate(self, publishers=None, noexecute=True): - """This is a generator function that yields a PlanDescription - object. - - Plan to reinstall non-editable files and hardlinks to a dehydrated - image. Once an operation has been planned, it may be executed by - first calling prepare(), and then execute_plan(). After - execution of a plan, or to abandon a plan, reset() should be - called. - - 'publishers' is a list of publishers to dehydrate on. - - For all other parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - op = API_OP_REHYDRATE - return self.__plan_op(op, _noexecute=noexecute, - _refresh_catalogs=False, _update_index=False, - publishers=publishers) - - def gen_plan_verify(self, args, noexecute=True, unpackaged=False, - unpackaged_only=False, verify_paths=misc.EmptyI): - """This is a generator function that yields a PlanDescription - object. - - Plan to repair anything that fails to verify. Once an operation - has been planned, it may be executed by first calling prepare(), - and then execute_plan(). After execution of a plan, or to - abandon a plan, reset() should be called. - - For parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - op = API_OP_VERIFY - return self.__plan_op(op, args=args, _noexecute=noexecute, - _refresh_catalogs=False, _update_index=False, _new_be=None, - unpackaged=unpackaged, unpackaged_only=unpackaged_only, - verify_paths=verify_paths) - - def gen_plan_fix(self, args, backup_be=None, backup_be_name=None, - be_activate=True, be_name=None, new_be=None, noexecute=True, - unpackaged=False): - """This is a generator function that yields a PlanDescription - object. - - Plan to repair anything that fails to verify. Once an operation - has been planned, it may be executed by first calling prepare(), - and then execute_plan(). After execution of a plan, or to - abandon a plan, reset() should be called. - - For parameters, refer to the 'gen_plan_install' - function for an explanation of their usage and effects.""" - - op = API_OP_FIX - return self.__plan_op(op, args=args, _be_activate=be_activate, - _backup_be=backup_be, _backup_be_name=backup_be_name, - _be_name=be_name, _new_be=new_be, _noexecute=noexecute, - _refresh_catalogs=False, _update_index=False, - unpackaged=unpackaged) - - def attach_linked_child(self, lin, li_path, li_props=None, - accept=False, allow_relink=False, force=False, li_md_only=False, - li_pkg_updates=True, noexecute=False, - refresh_catalogs=True, reject_list=misc.EmptyI, - show_licenses=False, update_index=True): - """Attach an image as a child to the current image (the - current image will become a parent image. This operation - results in attempting to sync the child image with the parent - image. - - 'lin' is the name of the child image - - 'li_path' is the path to the child image - - 'li_props' optional linked image properties to apply to the - child image. - - 'allow_relink' indicates whether we should allow linking of a - child image that is already linked (the child may already - be a child or a parent image). - - 'force' indicates whether we should allow linking of a child - image even if the specified linked image type doesn't support - attaching of children. - - 'li_md_only' indicates whether we should only update linked - image metadata and not actually try to sync the child image. - - 'li_pkg_updates' indicates whether we should disallow pkg - updates during the child image sync. - - 'noexecute' indicates if we should actually make any changes - rather or just simulate the operation. - - 'refresh_catalogs' controls whether the catalogs will - automatically be refreshed. - - 'reject_list' is a list of patterns not to be permitted - in solution; installed packages matching these patterns - are removed. - - 'update_index' determines whether client search indexes will - be updated in the child after the sync operation completes. - - This function returns a tuple of the format (rv, err) where rv - is a pkg.client.pkgdefs return value and if an error was - encountered err is an exception object which describes the - error.""" - - return self._img.linked.attach_child(lin, li_path, li_props, - accept=accept, allow_relink=allow_relink, force=force, - li_md_only=li_md_only, li_pkg_updates=li_pkg_updates, - noexecute=noexecute, - progtrack=self.__progresstracker, - refresh_catalogs=refresh_catalogs, reject_list=reject_list, - show_licenses=show_licenses, update_index=update_index) - - def detach_linked_children(self, li_list, force=False, - li_md_only=False, li_pkg_updates=True, noexecute=False): - """Detach one or more children from the current image. This - operation results in the removal of any constraint package - from the child images. - - 'li_list' a list of linked image name objects which specified - which children to operate on. If the list is empty then we - operate on all children. - - For all other parameters, refer to the 'attach_linked_child' - function for an explanation of their usage and effects. - - This function returns a dictionary where the keys are linked - image name objects and the values are the result of the - specified operation on the associated child image. The result - is a tuple of the format (rv, err) where rv is a - pkg.client.pkgdefs return value and if an error was - encountered err is an exception object which describes the - error.""" - - return self._img.linked.detach_children(li_list, - force=force, li_md_only=li_md_only, - li_pkg_updates=li_pkg_updates, - noexecute=noexecute) - - def detach_linked_rvdict2rv(self, rvdict): - """Convenience function that takes a dictionary returned from - an operations on multiple children and merges the results into - a single return code.""" - - return self._img.linked.detach_rvdict2rv(rvdict) - - def sync_linked_children(self, li_list, - accept=False, li_md_only=False, - li_pkg_updates=True, noexecute=False, - refresh_catalogs=True, show_licenses=False, update_index=True): - """Sync one or more children of the current image. - - For all other parameters, refer to the 'attach_linked_child' - and 'detach_linked_children' functions for an explanation of - their usage and effects. - - For a description of the return value, refer to the - 'detach_linked_children' function.""" - - rvdict = self._img.linked.sync_children(li_list, - accept=accept, li_md_only=li_md_only, - li_pkg_updates=li_pkg_updates, noexecute=noexecute, - progtrack=self.__progresstracker, - refresh_catalogs=refresh_catalogs, - show_licenses=show_licenses, update_index=update_index) - return rvdict - - def sync_linked_rvdict2rv(self, rvdict): - """Convenience function that takes a dictionary returned from - an operations on multiple children and merges the results into - a single return code.""" - - return self._img.linked.sync_rvdict2rv(rvdict) - - def audit_linked_children(self, li_list): - """Audit one or more children of the current image to see if - they are in sync with this image. - - For all parameters, refer to the 'detach_linked_children' - functions for an explanation of their usage and effects. - - For a description of the return value, refer to the - 'detach_linked_children' function.""" - - rvdict = self._img.linked.audit_children(li_list) - return rvdict - - def audit_linked_rvdict2rv(self, rvdict): - """Convenience function that takes a dictionary returned from - an operations on multiple children and merges the results into - a single return code.""" - - return self._img.linked.audit_rvdict2rv(rvdict) - - def audit_linked(self, li_parent_sync=True): - """If the current image is a child image, this function - audits the current image to see if it's in sync with it's - parent. - - For a description of the return value, refer to the - 'detach_linked_children' function.""" - - lin = self._img.linked.child_name - rvdict = {} - - if li_parent_sync: - # refresh linked image data from parent image. - rvdict[lin] = self._img.linked.syncmd_from_parent( - catch_exception=True) - if rvdict[lin] is not None: - return rvdict - - rvdict[lin] = self._img.linked.audit_self() - return rvdict - - def ischild(self): - """Indicates whether the current image is a child image.""" - return self._img.linked.ischild() - - def isparent(self, li_ignore=None): - """Indicates whether the current image is a parent image.""" - return self._img.linked.isparent(li_ignore) - - @staticmethod - def __utc_format(time_str, utc_now): - """Given a local time value string, formatted with - "%Y-%m-%dT%H:%M:%S, return a UTC representation of that value, - formatted with %Y%m%dT%H%M%SZ. This raises a ValueError if the - time was incorrectly formatted. If the time_str is "now", it - returns the value of utc_now""" - - if time_str == "now": - return utc_now + be.restore_install_uninstall() + # Must be done after bootenv restore. + self.log_operation_end(error=e) + raise + except: + # Handle exceptions that are not subclasses of + # Exception. + exc_type, exc_value, exc_traceback = sys.exc_info() - try: - local_dt = datetime.datetime.strptime(time_str, - "%Y-%m-%dT%H:%M:%S") - secs = time.mktime(local_dt.timetuple()) - utc_dt = datetime.datetime.utcfromtimestamp(secs) - return utc_dt.strftime("%Y%m%dT%H%M%SZ") - except ValueError as e: - raise apx.HistoryRequestException(e) - - def __get_history_paths(self, time_val, utc_now): - """Given a local timestamp, either as a discrete value, or a - range of values, formatted as '-', and a - path to find history xml files, return an array of paths that - match that timestamp. utc_now is the current time expressed in - UTC""" - - files = [] - if len(time_val) > 20 or time_val.startswith("now-"): - if time_val.startswith("now-"): - start = utc_now - finish = self.__utc_format(time_val[4:], - utc_now) - else: - # our ranges are 19 chars of timestamp, a '-', - # and another timestamp - start = self.__utc_format(time_val[:19], - utc_now) - finish = self.__utc_format(time_val[20:], - utc_now) - if start > finish: - raise apx.HistoryRequestException(_("Start " - "time must be older than finish time: " - "{0}").format(time_val)) - files = self.__get_history_range(start, finish) + if self.__new_be: + be.restore_image() else: - # there can be multiple event files per timestamp - prefix = self.__utc_format(time_val, utc_now) - files = glob.glob(os.path.join(self._img.history.path, - "{0}*".format(prefix))) - if not files: - raise apx.HistoryRequestException(_("No history " - "entries found for {0}").format(time_val)) - return files - - def __get_history_range(self, start, finish): - """Given a start and finish date, formatted as UTC date strings - as per __utc_format(), return a list of history filenames that - fall within that date range. A range of two equal dates is - the equivalent of just retrieving history for that single date - string.""" - - entries = [] - all_entries = sorted(os.listdir(self._img.history.path)) - - for entry in all_entries: - # our timestamps are always 16 character datestamps - basename = os.path.basename(entry)[:16] - if basename >= start: - if basename > finish: - # we can stop looking now. - break - entries.append(entry) - return entries - - def gen_history(self, limit=None, times=misc.EmptyI): - """A generator function that returns History objects up to the - limit specified matching the times specified. - - 'limit' is an optional integer value specifying the maximum - number of entries to return. - - 'times' is a list of timestamp or timestamp range strings to - restrict the returned entries to.""" - - # Make entries a set to cope with multiple overlapping ranges or - # times. - entries = set() - - utc_now = datetime.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") - for time_val in times: - # Ranges are 19 chars of timestamp, a '-', and - # another timestamp. - if len(time_val) > 20 or time_val.startswith("now-"): - if time_val.startswith("now-"): - start = utc_now - finish = self.__utc_format(time_val[4:], - utc_now) - else: - start = self.__utc_format(time_val[:19], - utc_now) - finish = self.__utc_format( - time_val[20:], utc_now) - if start > finish: - raise apx.HistoryRequestException( - _("Start time must be older than " - "finish time: {0}").format( - time_val)) - files = self.__get_history_range(start, finish) - else: - # There can be multiple entries per timestamp. - prefix = self.__utc_format(time_val, utc_now) - files = glob.glob(os.path.join( - self._img.history.path, "{0}*".format( - prefix))) - - try: - files = self.__get_history_paths(time_val, - utc_now) - entries.update(files) - except ValueError: - raise apx.HistoryRequestException(_("Invalid " - "time format '{0}'. Please use " - "%Y-%m-%dT%H:%M:%S or\n" - "%Y-%m-%dT%H:%M:%S-" - "%Y-%m-%dT%H:%M:%S").format(time_val)) - - if not times: - try: - entries = os.listdir(self._img.history.path) - except EnvironmentError as e: - if e.errno == errno.ENOENT: - # No history to list. - return - raise apx._convert_error(e) - - entries = sorted(entries) - if limit: - limit *= -1 - entries = entries[limit:] - - try: - uuid_be_dic = bootenv.BootEnv.get_uuid_be_dic() - except apx.ApiException as e: - uuid_be_dic = {} - - for entry in entries: - # Yield each history entry object as it is loaded. - try: - yield history.History( - root_dir=self._img.history.root_dir, - filename=entry, uuid_be_dic=uuid_be_dic) - except apx.HistoryLoadException as e: - if e.parse_failure: - # Ignore corrupt entries. - continue - raise - - def get_linked_name(self): - """If the current image is a child image, this function - returns a linked image name object which represents the name - of the current image.""" - return self._img.linked.child_name + be.restore_install_uninstall() + # Must be done after bootenv restore. + self.log_operation_end(error=exc_type) + raise - def get_linked_props(self, lin=None): - """Return a dictionary which represents the linked image - properties associated with a linked image. + self._img.linked.api_recurse_execute(self.__progresstracker) - 'lin' is the name of the child image. If lin is None then - the current image is assumed to be a linked image and it's - properties are returned.""" + self.__finished_execution(be) + if raise_later: + raise raise_later - return self._img.linked.child_props(lin=lin) + finally: + self._img.cleanup_downloads() + if self._img.locked: + self._img.unlock() + self._activity_lock.release() + + def __finished_execution(self, be): + if self._img.imageplan.state != plandesc.EXECUTED_OK: + if self.__new_be: + be.restore_image() + else: + be.restore_install_uninstall() + + error = apx.ImageplanStateException(self._img.imageplan.state) + # Must be done after bootenv restore. + self.log_operation_end(error=error) + raise error + + if self._img.imageplan.boot_archive_needed() or self.__new_be: + be.update_boot_archive() + + self._img.hotfix_origin_cleanup() + + if self.__new_be: + be.activate_image(set_active=self.__be_activate) + else: + be.activate_install_uninstall() + self._img.cleanup_cached_content(progtrack=self.__progresstracker) + # If the end of the operation wasn't already logged + # by one of the previous operations, then log it as + # ending now. + if self._img.history.operation_name: + self.log_operation_end( + release_notes=self._img.imageplan.pd.release_notes_name + ) + self.__executed = True + + def set_plan_license_status( + self, pfmri, plicense, accepted=None, displayed=None + ): + """Sets the license status for the given package FMRI and + license entry. + + 'accepted' is an optional parameter that can be one of three + values: + None leaves accepted status unchanged + False sets accepted status to False + True sets accepted status to True + + 'displayed' is an optional parameter that can be one of three + values: + None leaves displayed status unchanged + False sets displayed status to False + True sets displayed status to True""" + + self._acquire_activity_lock() + try: + try: + self._disable_cancel() + except apx.CanceledException: + self._cancel_done() + raise - def list_linked(self, li_ignore=None): - """Returns a list of linked images associated with the - current image. This includes both child and parent images. + if not self._img.imageplan: + raise apx.PlanMissingException() - For all parameters, refer to the 'gen_plan_install' function - for an explanation of their usage and effects. + for pp in self.__plan_desc.pkg_plans: + if pp.destination_fmri == pfmri: + pp.set_license_status( + plicense, accepted=accepted, displayed=displayed + ) + break + finally: + self._activity_lock.release() + + def refresh( + self, + full_refresh=False, + pubs=None, + immediate=False, + ignore_unreachable=True, + ): + """Refreshes the metadata (e.g. catalog) for one or more + publishers. + + 'full_refresh' is an optional boolean value indicating whether + a full retrieval of publisher metadata (e.g. catalogs) or only + an update to the existing metadata should be performed. When + True, 'immediate' is also set to True. + + 'pubs' is a list of publisher prefixes or publisher objects + to refresh. Passing an empty list or using the default value + implies all publishers. + + 'immediate' is an optional boolean value indicating whether + a refresh should occur now. If False, a publisher's selected + repository will only be checked for updates if the update + interval period recorded in the image configuration has been + exceeded. + + 'ignore_unreachable' is an optional boolean value indicating + whether unreachable repositories should be ignored. If True, + errors contacting this repository are stored in the transport + but no exception is raised, allowing an operation to continue + if an unneeded repository is not online. + + Currently returns an image object, allowing existing code to + work while the rest of the API is put into place.""" + + self._acquire_activity_lock() + try: + self._disable_cancel() + self._img.lock() + try: + self.__refresh( + full_refresh=full_refresh, + pubs=pubs, + ignore_unreachable=ignore_unreachable, + immediate=immediate, + ) + return self._img + finally: + self._img.unlock() + self._img.cleanup_downloads() + except apx.CanceledException: + self._cancel_done() + raise + finally: + try: + if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: + self._img.transport.stats.dump() + except ValueError: + # Don't generate stats if an invalid value + # is supplied. + pass + self._activity_lock.release() + + def __refresh( + self, + full_refresh=False, + pubs=None, + immediate=False, + ignore_unreachable=True, + ): + """Private refresh method; caller responsible for locking and + cleanup.""" + + self._img.refresh_publishers( + full_refresh=full_refresh, + ignore_unreachable=ignore_unreachable, + immediate=immediate, + pubs=pubs, + progtrack=self.__progresstracker, + ) + + def __licenses(self, pfmri, mfst, alt_pub=None): + """Private function. Returns the license info from the + manifest mfst.""" + license_lst = [] + for lic in mfst.gen_actions_by_type("license"): + license_lst.append( + LicenseInfo(pfmri, lic, img=self._img, alt_pub=alt_pub) + ) + return license_lst + + @_LockedCancelable() + def get_pkg_categories(self, installed=False, pubs=misc.EmptyI, repos=None): + """Returns an ordered list of tuples of the form (scheme, + category) containing the names of all categories in use by + the last version of each unique package in the catalog on a + per-publisher basis. + + 'installed' is an optional boolean value indicating whether + only the categories used by currently installed packages + should be returned. If False, the categories used by the + latest vesion of every known package will be returned + instead. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. + + 'repos' is a list of URI strings or RepositoryURI objects that + represent the locations of package repositories to list packages + for. + """ - The returned value is a list of tuples where each tuple - contains (
  • , ,
  • ).""" + if installed: + excludes = misc.EmptyI + else: + excludes = self._img.list_excludes() + + if repos: + ignored, ignored, known_cat, inst_cat = self.__get_alt_pkg_data( + repos + ) + if installed: + pkg_cat = inst_cat + else: + pkg_cat = known_cat + elif installed: + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_INSTALLED) + else: + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_KNOWN) + return sorted(pkg_cat.categories(excludes=excludes, pubs=pubs)) + + def __map_installed_newest(self, pubs, known_cat=None): + """Private function. Maps incorporations and publisher + relationships for installed packages and returns them + as a tuple of (pub_ranks, inc_stems, inc_vers, inst_stems, + ren_stems, ren_inst_stems). + """ - return self._img.linked.list_related(li_ignore=li_ignore) + img_cat = self._img.get_catalog(self._img.IMG_CATALOG_INSTALLED) + cat_info = frozenset([img_cat.DEPENDENCY]) - def parse_linked_name(self, li_name, allow_unknown=False): - """Given a string representing a linked image child name, - returns linked image name object representing the same name. + inst_stems = {} + ren_inst_stems = {} + ren_stems = {} - 'allow_unknown' indicates whether the name must represent - actual children or simply be syntactically correct.""" + inc_stems = {} + inc_vers = {} - return self._img.linked.parse_name(li_name, allow_unknown) + pub_ranks = self._img.get_publisher_ranks() - def parse_linked_name_list(self, li_name_list, allow_unknown=False): - """Given a list of strings representing linked image child - names, returns a list of linked image name objects - representing the same names. + # The incorporation list should include all installed, + # incorporated packages from all publishers. + for t in img_cat.entry_actions(cat_info): + (pub, stem, ver), entry, actions = t - For all other parameters, refer to the 'parse_linked_name' - function for an explanation of their usage and effects.""" + inst_stems[stem] = ver + pkgr = False + targets = set() + try: + for a in actions: + if a.name == "set" and a.attrs["name"] == "pkg.renamed": + pkgr = True + continue + elif a.name != "depend": + continue - return [ - self.parse_linked_name(li_name, allow_unknown) - for li_name in li_name_list - ] + if a.attrs["type"] == "require": + # Because the actions are not + # returned in a guaranteed + # order, the dependencies will + # have to be recorded for + # evaluation later. + targets.add(a.attrs["fmri"]) + elif a.attrs["type"] == "incorporate": + # Record incorporated packages. + tgt = fmri.PkgFmri(a.attrs["fmri"]) + tver = tgt.version + # incorporates without a version + # should be ignored. + if not tver: + continue + over = inc_vers.get(tgt.pkg_name, None) + + # In case this package has been + # incorporated more than once, + # use the newest version. + if over is not None and over > tver: + continue + inc_vers[tgt.pkg_name] = tver + except apx.InvalidPackageErrors: + # For mapping purposes, ignore unsupported + # (and invalid) actions. This is necessary so + # that API consumers can discover new package + # data that may be needed to perform an upgrade + # so that the API can understand them. + pass + + if pkgr: + for f in targets: + tgt = fmri.PkgFmri(f) + ren_stems[tgt.pkg_name] = stem + ren_inst_stems.setdefault(stem, set()) + ren_inst_stems[stem].add(tgt.pkg_name) + + def check_stem(t, entry): + pub, stem, ver = t + if stem in inst_stems: + iver = inst_stems[stem] + if stem in ren_inst_stems or ver == iver: + # The package has been renamed + # or the entry is for the same + # version as that which is + # installed, so doesn't need + # to be checked. + return False + # The package may have been renamed in + # a newer version, so must be checked. + return True + elif stem in inc_vers: + # Package is incorporated, but not + # installed, so should be checked. + return True - def describe(self): - """Returns None if no plan is ready yet, otherwise returns - a PlanDescription.""" + tgt = ren_stems.get(stem, None) + while tgt is not None: + # This seems counter-intuitive, but + # for performance and other reasons, + # this stem should only be checked + # for a rename if it is incorporated + # or installed using a previous name. + if tgt in inst_stems or tgt in inc_vers: + return True + tgt = ren_stems.get(tgt, None) + + # Package should not be checked. + return False + + if not known_cat: + known_cat = self._img.get_catalog(self._img.IMG_CATALOG_KNOWN) + + # Find terminal rename entry for all known packages not + # rejected by check_stem(). + for t, entry, actions in known_cat.entry_actions( + cat_info, cb=check_stem, last=True + ): + pkgr = False + targets = set() + try: + for a in actions: + if a.name == "set" and a.attrs["name"] == "pkg.renamed": + pkgr = True + continue - return self.__plan_desc + if a.name != "depend": + continue - def prepare(self): - """Takes care of things which must be done before the plan can - be executed. This includes downloading the packages to disk and - preparing the indexes to be updated during execution. Should - only be called once a gen_plan_*() method has been called. If - a plan is abandoned after calling this method, reset() should - be called.""" + if a.attrs["type"] != "require": + continue - self._acquire_activity_lock() - try: - self._img.lock() - except: - self._activity_lock.release() - raise + # Because the actions are not + # returned in a guaranteed + # order, the dependencies will + # have to be recorded for + # evaluation later. + targets.add(a.attrs["fmri"]) + except apx.InvalidPackageErrors: + # For mapping purposes, ignore unsupported + # (and invalid) actions. This is necessary so + # that API consumers can discover new package + # data that may be needed to perform an upgrade + # so that the API can understand them. + pass + + if pkgr: + pub, stem, ver = t + for f in targets: + tgt = fmri.PkgFmri(f) + ren_stems[tgt.pkg_name] = stem + + # Determine highest ranked publisher for package stems + # listed in installed incorporations. + def pub_key(item): + return pub_ranks[item][0] + + for p in sorted(pub_ranks, key=pub_key): + if pubs and p not in pubs: + continue + for stem in known_cat.names(pubs=[p]): + if stem in inc_vers: + inc_stems.setdefault(stem, p) + + return ( + pub_ranks, + inc_stems, + inc_vers, + inst_stems, + ren_stems, + ren_inst_stems, + ) + + def __get_temp_repo_pubs(self, repos): + """Private helper function to retrieve publisher information + from list of temporary repositories. Caller is responsible + for locking.""" + + ret_pubs = [] + for repo_uri in repos: + if isinstance(repo_uri, six.string_types): + repo = publisher.RepositoryURI(repo_uri) + else: + # Already a RepositoryURI. + repo = repo_uri + + pubs = None + try: + pubs = self._img.transport.get_publisherdata( + repo, ccancel=self.__check_cancel + ) + except apx.UnsupportedRepositoryOperation: + raise apx.RepoPubConfigUnavailable(location=str(repo)) + + if not pubs: + # Empty repository configuration. + raise apx.RepoPubConfigUnavailable(location=str(repo)) + + for p in pubs: + p.client_uuid = "transient" + psrepo = p.repository + if not psrepo: + # Repository configuration info wasn't + # provided, so assume origin is + # repo_uri. + p.repository = publisher.Repository(origins=[repo_uri]) + elif not psrepo.origins: + # Repository configuration was provided, + # but without an origin. Assume the + # repo_uri is the origin. + psrepo.add_origin(repo_uri) + elif repo not in psrepo.origins: + # If the repo_uri used is not + # in the list of sources, then + # add it as the first origin. + psrepo.origins.insert(0, repo) + ret_pubs.extend(pubs) + + return sorted(ret_pubs) + + def __get_alt_pkg_data(self, repos): + """Private helper function to retrieve composite known and + installed catalog and package repository map for temporary + set of package repositories. Returns (pkg_pub_map, alt_pubs, + known_cat, inst_cat).""" + + repos = set(repos) + eid = ",".join(sorted(map(str, repos))) + try: + return self.__alt_sources[eid] + except KeyError: + # Need to cache new set of alternate sources. + pass + + img_inst_cat = self._img.get_catalog(self._img.IMG_CATALOG_INSTALLED) + img_inst_base = img_inst_cat.get_part("catalog.base.C", must_exist=True) + op_time = datetime.datetime.utcnow() + pubs = self.__get_temp_repo_pubs(repos) + progtrack = self.__progresstracker + + # Create temporary directories. + tmpdir = tempfile.mkdtemp() + + pkg_repos = {} + pkg_pub_map = {} + # Too many nested blocks; + # pylint: disable=R0101 + try: + progtrack.refresh_start(len(pubs), full_refresh=False) + failed = [] + pub_cats = [] + for pub in pubs: + # Assign a temporary meta root to each + # publisher. + meta_root = os.path.join(tmpdir, str(id(pub))) + misc.makedirs(meta_root) + pub.meta_root = meta_root + pub.transport = self._img.transport + repo = pub.repository + pkg_repos[id(repo)] = repo + # Retrieve each publisher's catalog. + progtrack.refresh_start_pub(pub) try: - if not self._img.imageplan: - raise apx.PlanMissingException() - - if not self.__planned_children: - # if we never planned children images then we - # didn't finish planning. - raise apx.PlanMissingException() - - if self.__prepared: - raise apx.AlreadyPreparedException() - - self._enable_cancel() - - try: - self._img.imageplan.preexecute() - except search_errors.ProblematicPermissionsIndexException as e: - raise apx.ProblematicPermissionsIndexException(e) - except: - raise - - self._disable_cancel() - self.__prepared = True - except apx.CanceledException as e: - self._cancel_done() - if self._img.history.operation_name: - # If an operation is in progress, log - # the error and mark its end. - self.log_operation_end(error=e) - raise - except Exception as e: - self._cancel_cleanup_exception() - if self._img.history.operation_name: - # If an operation is in progress, log - # the error and mark its end. - self.log_operation_end(error=e) - raise - except: - # Handle exceptions that are not subclasses of - # Exception. - self._cancel_cleanup_exception() - if self._img.history.operation_name: - # If an operation is in progress, log - # the error and mark its end. - exc_type, exc_value, exc_traceback = \ - sys.exc_info() - self.log_operation_end(error=exc_type) - raise + pub.refresh() + except apx.PermissionsException as e: + failed.append((pub, e)) + # No point in continuing since no data + # can be written. + break + except apx.ApiException as e: + failed.append((pub, e)) + continue finally: - self._img.cleanup_downloads() - self._img.unlock() - try: - if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: - self._img.transport.stats.dump() - except ValueError: - # Don't generate stats if an invalid value - # is supplied. - pass - self._activity_lock.release() - - self._img.linked.api_recurse_prepare(self.__progresstracker) - - def execute_plan(self): - """Executes the plan. This is uncancelable once it begins. - Should only be called after the prepare method has been - called. After plan execution, reset() should be called.""" - - self._acquire_activity_lock() - try: - self._disable_cancel() - self._img.lock() - except: - self._activity_lock.release() - raise + progtrack.refresh_end_pub(pub) + pub_cats.append((pub.prefix, repo, pub.catalog)) - try: - if not self._img.imageplan: - raise apx.PlanMissingException() - - if not self.__prepared: - raise apx.PrematureExecutionException() - - if self.__executed: - raise apx.AlreadyExecutedException() - - try: - be = bootenv.BootEnv(self._img, - self.__progresstracker) - except RuntimeError: - be = bootenv.BootEnvNull(self._img) - self._img.bootenv = be - - if not self.__new_be and \ - self._img.imageplan.reboot_needed() and \ - self._img.is_liveroot(): - e = apx.RebootNeededOnLiveImageException() - self.log_operation_end(error=e) - raise e - - # Before proceeding, create a backup boot environment if - # requested. - if self.__backup_be: - try: - be.create_backup_be( - be_name=self.__backup_be_name) - except Exception as e: - self.log_operation_end(error=e) - raise - except: - # Handle exceptions that are not - # subclasses of Exception. - exc_type, exc_value, exc_traceback = \ - sys.exc_info() - self.log_operation_end(error=exc_type) - raise - - # After (possibly) creating backup be, determine if - # operation should execute on a clone of current BE. - if self.__new_be: - try: - be.init_image_recovery(self._img, - self.__be_name) - except Exception as e: - self.log_operation_end(error=e) - raise - except: - # Handle exceptions that are not - # subclasses of Exception. - exc_type, exc_value, exc_traceback = \ - sys.exc_info() - self.log_operation_end(error=exc_type) - raise - # check if things gained underneath us - if self._img.is_liveroot(): - e = apx.UnableToCopyBE() - self.log_operation_end(error=e) - raise e - - raise_later = None - - # we're about to execute a plan so change our current - # working directory to / so that we won't fail if we - # try to remove our current working directory - os.chdir(os.sep) - - try: - try: - self._img.imageplan.execute() - except apx.WrapIndexingException as e: - raise_later = e - - if not self._img.linked.nothingtodo(): - self._img.linked.syncmd() - except RuntimeError as e: - if self.__new_be: - be.restore_image() - else: - be.restore_install_uninstall() - # Must be done after bootenv restore. - self.log_operation_end(error=e) - raise - except search_errors.IndexLockedException as e: - error = apx.IndexLockedException(e) - self.log_operation_end(error=error) - raise error - except search_errors.ProblematicPermissionsIndexException as e: - error = apx.ProblematicPermissionsIndexException(e) - self.log_operation_end(error=error) - raise error - except search_errors.InconsistentIndexException as e: - error = apx.CorruptedIndexException(e) - self.log_operation_end(error=error) - raise error - except NonzeroExitException as e: - # Won't happen during update - be.restore_install_uninstall() - error = apx.ActuatorException(e) - self.log_operation_end(error=error) - raise error - except apx.InvalidMediatorTarget as e: - # Mount a new BE but do not activate it in case the - # missing mediator target will cause a broken system. - # Allows the admin to take the appropriate action. - if self.__new_be: - be.restore_image() - self.log_operation_end(error=e) - raise e - except Exception as e: - if self.__new_be: - be.restore_image() - else: - be.restore_install_uninstall() - # Must be done after bootenv restore. - self.log_operation_end(error=e) - raise - except: - # Handle exceptions that are not subclasses of - # Exception. - exc_type, exc_value, exc_traceback = \ - sys.exc_info() - - if self.__new_be: - be.restore_image() - else: - be.restore_install_uninstall() - # Must be done after bootenv restore. - self.log_operation_end(error=exc_type) - raise - - self._img.linked.api_recurse_execute( - self.__progresstracker) - - self.__finished_execution(be) - if raise_later: - raise raise_later + progtrack.refresh_done() - finally: - self._img.cleanup_downloads() - if self._img.locked: - self._img.unlock() - self._activity_lock.release() - - def __finished_execution(self, be): - if self._img.imageplan.state != plandesc.EXECUTED_OK: - if self.__new_be: - be.restore_image() - else: - be.restore_install_uninstall() - - error = apx.ImageplanStateException( - self._img.imageplan.state) - # Must be done after bootenv restore. - self.log_operation_end(error=error) - raise error - - if self._img.imageplan.boot_archive_needed() or \ - self.__new_be: - be.update_boot_archive() - - self._img.hotfix_origin_cleanup() + if failed: + total = len(pub_cats) + len(failed) + e = apx.CatalogRefreshException(failed, total, len(pub_cats)) + raise e - if self.__new_be: - be.activate_image(set_active=self.__be_activate) + # Determine upgradability. + newest = {} + for pfx, repo, cat in [(None, None, img_inst_cat)] + pub_cats: + if pfx: + pkg_list = cat.fmris(last=True, pubs=[pfx]) else: - be.activate_install_uninstall() - self._img.cleanup_cached_content( - progtrack=self.__progresstracker) - # If the end of the operation wasn't already logged - # by one of the previous operations, then log it as - # ending now. - if self._img.history.operation_name: - self.log_operation_end(release_notes= - self._img.imageplan.pd.release_notes_name) - self.__executed = True - - def set_plan_license_status(self, pfmri, plicense, accepted=None, - displayed=None): - """Sets the license status for the given package FMRI and - license entry. - - 'accepted' is an optional parameter that can be one of three - values: - None leaves accepted status unchanged - False sets accepted status to False - True sets accepted status to True - - 'displayed' is an optional parameter that can be one of three - values: - None leaves displayed status unchanged - False sets displayed status to False - True sets displayed status to True""" - - self._acquire_activity_lock() - try: - try: - self._disable_cancel() - except apx.CanceledException: - self._cancel_done() - raise - - if not self._img.imageplan: - raise apx.PlanMissingException() - - for pp in self.__plan_desc.pkg_plans: - if pp.destination_fmri == pfmri: - pp.set_license_status(plicense, - accepted=accepted, - displayed=displayed) - break - finally: - self._activity_lock.release() - - def refresh(self, full_refresh=False, pubs=None, immediate=False, - ignore_unreachable=True): - """Refreshes the metadata (e.g. catalog) for one or more - publishers. - - 'full_refresh' is an optional boolean value indicating whether - a full retrieval of publisher metadata (e.g. catalogs) or only - an update to the existing metadata should be performed. When - True, 'immediate' is also set to True. - - 'pubs' is a list of publisher prefixes or publisher objects - to refresh. Passing an empty list or using the default value - implies all publishers. - - 'immediate' is an optional boolean value indicating whether - a refresh should occur now. If False, a publisher's selected - repository will only be checked for updates if the update - interval period recorded in the image configuration has been - exceeded. - - 'ignore_unreachable' is an optional boolean value indicating - whether unreachable repositories should be ignored. If True, - errors contacting this repository are stored in the transport - but no exception is raised, allowing an operation to continue - if an unneeded repository is not online. - - Currently returns an image object, allowing existing code to - work while the rest of the API is put into place.""" - - self._acquire_activity_lock() - try: - self._disable_cancel() - self._img.lock() - try: - self.__refresh(full_refresh=full_refresh, - pubs=pubs, - ignore_unreachable=ignore_unreachable, - immediate=immediate) - return self._img - finally: - self._img.unlock() - self._img.cleanup_downloads() - except apx.CanceledException: - self._cancel_done() - raise - finally: - try: - if int(os.environ.get("PKG_DUMP_STATS", 0)) > 0: - self._img.transport.stats.dump() - except ValueError: - # Don't generate stats if an invalid value - # is supplied. - pass - self._activity_lock.release() - - def __refresh(self, full_refresh=False, pubs=None, immediate=False, - ignore_unreachable=True): - """Private refresh method; caller responsible for locking and - cleanup.""" - - self._img.refresh_publishers(full_refresh=full_refresh, - ignore_unreachable=ignore_unreachable, - immediate=immediate, pubs=pubs, - progtrack=self.__progresstracker) - - def __licenses(self, pfmri, mfst, alt_pub=None): - """Private function. Returns the license info from the - manifest mfst.""" - license_lst = [] - for lic in mfst.gen_actions_by_type("license"): - license_lst.append(LicenseInfo(pfmri, lic, - img=self._img, alt_pub=alt_pub)) - return license_lst - - @_LockedCancelable() - def get_pkg_categories(self, installed=False, pubs=misc.EmptyI, - repos=None): - """Returns an ordered list of tuples of the form (scheme, - category) containing the names of all categories in use by - the last version of each unique package in the catalog on a - per-publisher basis. - - 'installed' is an optional boolean value indicating whether - only the categories used by currently installed packages - should be returned. If False, the categories used by the - latest vesion of every known package will be returned - instead. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. - - 'repos' is a list of URI strings or RepositoryURI objects that - represent the locations of package repositories to list packages - for. - """ - - if installed: - excludes = misc.EmptyI - else: - excludes = self._img.list_excludes() - - if repos: - ignored, ignored, known_cat, inst_cat = \ - self.__get_alt_pkg_data(repos) + pkg_list = cat.fmris(last=True) + + for f in pkg_list: + nver, snver = newest.get(f.pkg_name, (None, None)) + if f.version > nver: + newest[f.pkg_name] = (f.version, str(f.version)) + + # Build list of installed packages. + inst_stems = {} + for t, entry in img_inst_cat.tuple_entries(): + states = entry["metadata"]["states"] + if pkgdefs.PKG_STATE_INSTALLED not in states: + continue + pub, stem, ver = t + inst_stems.setdefault(pub, {}) + inst_stems[pub].setdefault(stem, {}) + inst_stems[pub][stem][ver] = False + + # Now create composite known and installed catalogs. + compicat = pkg.catalog.Catalog(batch_mode=True, sign=False) + compkcat = pkg.catalog.Catalog(batch_mode=True, sign=False) + + sparts = ( + (pfx, cat, repo, name, cat.get_part(name, must_exist=True)) + for pfx, repo, cat in pub_cats + for name in cat.parts + ) + + excludes = self._img.list_excludes() + proc_stems = {} + for pfx, cat, repo, name, spart in sparts: + # 'spart' is the source part. + if spart is None: + # Client hasn't retrieved this part. + continue + + # New known part. + nkpart = compkcat.get_part(name) + nipart = compicat.get_part(name) + base = name.startswith("catalog.base.") + + # Avoid accessor overhead since these will be + # used for every entry. + cat_ver = cat.version + dp = cat.get_part("catalog.dependency.C", must_exist=True) + + for t, sentry in spart.tuple_entries(pubs=[pfx]): + pub, stem, ver = t + + pkg_pub_map.setdefault(pub, {}) + pkg_pub_map[pub].setdefault(stem, {}) + pkg_pub_map[pub][stem].setdefault(ver, set()) + pkg_pub_map[pub][stem][ver].add(id(repo)) + + if ( + pub in proc_stems + and stem in proc_stems[pub] + and ver in proc_stems[pub][stem] + ): + if id(cat) != proc_stems[pub][stem][ver]: + # Already added from another + # catalog. + continue + else: + proc_stems.setdefault(pub, {}) + proc_stems[pub].setdefault(stem, {}) + proc_stems[pub][stem][ver] = id(cat) + + installed = False + if ( + pub in inst_stems + and stem in inst_stems[pub] + and ver in inst_stems[pub][stem] + ): + installed = True + inst_stems[pub][stem][ver] = True + + # copy() is too slow here and catalog + # entries are shallow so this should be + # sufficient. + entry = dict(six.iteritems(sentry)) + if not base: + # Nothing else to do except add + # the entry for non-base catalog + # parts. + nkpart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) if installed: - pkg_cat = inst_cat - else: - pkg_cat = known_cat - elif installed: - pkg_cat = self._img.get_catalog( - self._img.IMG_CATALOG_INSTALLED) - else: - pkg_cat = self._img.get_catalog( - self._img.IMG_CATALOG_KNOWN) - return sorted(pkg_cat.categories(excludes=excludes, pubs=pubs)) - - def __map_installed_newest(self, pubs, known_cat=None): - """Private function. Maps incorporations and publisher - relationships for installed packages and returns them - as a tuple of (pub_ranks, inc_stems, inc_vers, inst_stems, - ren_stems, ren_inst_stems). - """ + nipart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) + continue - img_cat = self._img.get_catalog( - self._img.IMG_CATALOG_INSTALLED) - cat_info = frozenset([img_cat.DEPENDENCY]) + # Only the base catalog part stores + # package state information and/or + # other metadata. + mdata = {} + if installed: + mdata = dict( + img_inst_base.get_entry( + pub=pub, stem=stem, ver=ver + )["metadata"] + ) - inst_stems = {} - ren_inst_stems = {} - ren_stems = {} + entry["metadata"] = mdata + + states = [ + pkgdefs.PKG_STATE_KNOWN, + pkgdefs.PKG_STATE_ALT_SOURCE, + ] + if cat_ver == 0: + states.append(pkgdefs.PKG_STATE_V0) + else: + # Assume V1 catalog source. + states.append(pkgdefs.PKG_STATE_V1) + + if installed: + states.append(pkgdefs.PKG_STATE_INSTALLED) + + nver, snver = newest.get(stem, (None, None)) + if snver is not None and ver != snver: + states.append(pkgdefs.PKG_STATE_UPGRADABLE) + + # Determine if package is obsolete or + # has been renamed and mark with + # appropriate state. + dpent = None + if dp is not None: + dpent = dp.get_entry(pub=pub, stem=stem, ver=ver) + if dpent is not None: + for a in dpent["actions"]: + # Constructing action + # objects for every + # action would be a lot + # slower, so a simple + # string match is done + # first so that only + # interesting actions + # get constructed. + if not a.startswith("set"): + continue + if not ( + "pkg.obsolete" in a + or "pkg.renamed" in a + or "pkg.legacy" in a + ): + continue - inc_stems = {} - inc_vers = {} + try: + act = pkg.actions.fromstr(a) + except pkg.actions.ActionError: + # If the action can't be + # parsed or is not yet + # supported, continue. + continue - pub_ranks = self._img.get_publisher_ranks() + if act.attrs["value"].lower() != "true": + continue - # The incorporation list should include all installed, - # incorporated packages from all publishers. - for t in img_cat.entry_actions(cat_info): - (pub, stem, ver), entry, actions = t + if act.attrs["name"] == "pkg.obsolete": + states.append(pkgdefs.PKG_STATE_OBSOLETE) + elif act.attrs["name"] == "pkg.renamed": + if not act.include_this( + excludes, publisher=pub + ): + continue + states.append(pkgdefs.PKG_STATE_RENAMED) + elif act.attrs["name"] == "pkg.legacy": + states.append(pkgdefs.PKG_STATE_LEGACY) + + mdata["states"] = states + + # Add base entries. + nkpart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) + if installed: + nipart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) - inst_stems[stem] = ver - pkgr = False - targets = set() - try: - for a in actions: - if a.name == "set" and \ - a.attrs["name"] == "pkg.renamed": - pkgr = True - continue - elif a.name != "depend": - continue - - if a.attrs["type"] == "require": - # Because the actions are not - # returned in a guaranteed - # order, the dependencies will - # have to be recorded for - # evaluation later. - targets.add(a.attrs["fmri"]) - elif a.attrs["type"] == "incorporate": - # Record incorporated packages. - tgt = fmri.PkgFmri( - a.attrs["fmri"]) - tver = tgt.version - # incorporates without a version - # should be ignored. - if not tver: - continue - over = inc_vers.get( - tgt.pkg_name, None) - - # In case this package has been - # incorporated more than once, - # use the newest version. - if over is not None and \ - over > tver: - continue - inc_vers[tgt.pkg_name] = tver - except apx.InvalidPackageErrors: - # For mapping purposes, ignore unsupported - # (and invalid) actions. This is necessary so - # that API consumers can discover new package - # data that may be needed to perform an upgrade - # so that the API can understand them. - pass - - if pkgr: - for f in targets: - tgt = fmri.PkgFmri(f) - ren_stems[tgt.pkg_name] = stem - ren_inst_stems.setdefault(stem, - set()) - ren_inst_stems[stem].add( - tgt.pkg_name) - - def check_stem(t, entry): - pub, stem, ver = t - if stem in inst_stems: - iver = inst_stems[stem] - if stem in ren_inst_stems or \ - ver == iver: - # The package has been renamed - # or the entry is for the same - # version as that which is - # installed, so doesn't need - # to be checked. - return False - # The package may have been renamed in - # a newer version, so must be checked. - return True - elif stem in inc_vers: - # Package is incorporated, but not - # installed, so should be checked. - return True - - tgt = ren_stems.get(stem, None) - while tgt is not None: - # This seems counter-intuitive, but - # for performance and other reasons, - # this stem should only be checked - # for a rename if it is incorporated - # or installed using a previous name. - if tgt in inst_stems or \ - tgt in inc_vers: - return True - tgt = ren_stems.get(tgt, None) - - # Package should not be checked. - return False - - if not known_cat: - known_cat = self._img.get_catalog( - self._img.IMG_CATALOG_KNOWN) - - # Find terminal rename entry for all known packages not - # rejected by check_stem(). - for t, entry, actions in known_cat.entry_actions(cat_info, - cb=check_stem, last=True): - pkgr = False - targets = set() - try: - for a in actions: - if a.name == "set" and \ - a.attrs["name"] == "pkg.renamed": - pkgr = True - continue - - if a.name != "depend": - continue - - if a.attrs["type"] != "require": - continue - - # Because the actions are not - # returned in a guaranteed - # order, the dependencies will - # have to be recorded for - # evaluation later. - targets.add(a.attrs["fmri"]) - except apx.InvalidPackageErrors: - # For mapping purposes, ignore unsupported - # (and invalid) actions. This is necessary so - # that API consumers can discover new package - # data that may be needed to perform an upgrade - # so that the API can understand them. - pass - - if pkgr: - pub, stem, ver = t - for f in targets: - tgt = fmri.PkgFmri(f) - ren_stems[tgt.pkg_name] = stem - - # Determine highest ranked publisher for package stems - # listed in installed incorporations. - def pub_key(item): - return pub_ranks[item][0] - - for p in sorted(pub_ranks, key=pub_key): - if pubs and p not in pubs: - continue - for stem in known_cat.names(pubs=[p]): - if stem in inc_vers: - inc_stems.setdefault(stem, p) - - return (pub_ranks, inc_stems, inc_vers, inst_stems, ren_stems, - ren_inst_stems) - - def __get_temp_repo_pubs(self, repos): - """Private helper function to retrieve publisher information - from list of temporary repositories. Caller is responsible - for locking.""" - - ret_pubs = [] - for repo_uri in repos: - if isinstance(repo_uri, six.string_types): - repo = publisher.RepositoryURI(repo_uri) - else: - # Already a RepositoryURI. - repo = repo_uri - - pubs = None - try: - pubs = self._img.transport.get_publisherdata( - repo, ccancel=self.__check_cancel) - except apx.UnsupportedRepositoryOperation: - raise apx.RepoPubConfigUnavailable( - location=str(repo)) - - if not pubs: - # Empty repository configuration. - raise apx.RepoPubConfigUnavailable( - location=str(repo)) - - for p in pubs: - p.client_uuid = "transient" - psrepo = p.repository - if not psrepo: - # Repository configuration info wasn't - # provided, so assume origin is - # repo_uri. - p.repository = publisher.Repository( - origins=[repo_uri]) - elif not psrepo.origins: - # Repository configuration was provided, - # but without an origin. Assume the - # repo_uri is the origin. - psrepo.add_origin(repo_uri) - elif repo not in psrepo.origins: - # If the repo_uri used is not - # in the list of sources, then - # add it as the first origin. - psrepo.origins.insert(0, repo) - ret_pubs.extend(pubs) - - return sorted(ret_pubs) - - def __get_alt_pkg_data(self, repos): - """Private helper function to retrieve composite known and - installed catalog and package repository map for temporary - set of package repositories. Returns (pkg_pub_map, alt_pubs, - known_cat, inst_cat).""" - - repos = set(repos) - eid = ",".join(sorted(map(str, repos))) + pub_map = {} + for pub in pubs: try: - return self.__alt_sources[eid] + opub = pub_map[pub.prefix] except KeyError: - # Need to cache new set of alternate sources. - pass - - img_inst_cat = self._img.get_catalog( - self._img.IMG_CATALOG_INSTALLED) - img_inst_base = img_inst_cat.get_part("catalog.base.C", - must_exist=True) - op_time = datetime.datetime.utcnow() - pubs = self.__get_temp_repo_pubs(repos) - progtrack = self.__progresstracker - - # Create temporary directories. - tmpdir = tempfile.mkdtemp() - - pkg_repos = {} - pkg_pub_map = {} - # Too many nested blocks; - # pylint: disable=R0101 - try: - progtrack.refresh_start(len(pubs), full_refresh=False) - failed = [] - pub_cats = [] - for pub in pubs: - # Assign a temporary meta root to each - # publisher. - meta_root = os.path.join(tmpdir, str(id(pub))) - misc.makedirs(meta_root) - pub.meta_root = meta_root - pub.transport = self._img.transport - repo = pub.repository - pkg_repos[id(repo)] = repo - - # Retrieve each publisher's catalog. - progtrack.refresh_start_pub(pub) - try: - pub.refresh() - except apx.PermissionsException as e: - failed.append((pub, e)) - # No point in continuing since no data - # can be written. - break - except apx.ApiException as e: - failed.append((pub, e)) - continue - finally: - progtrack.refresh_end_pub(pub) - pub_cats.append(( - pub.prefix, - repo, - pub.catalog - )) - - progtrack.refresh_done() - - if failed: - total = len(pub_cats) + len(failed) - e = apx.CatalogRefreshException(failed, total, - len(pub_cats)) - raise e - - # Determine upgradability. - newest = {} - for pfx, repo, cat in [(None, None, img_inst_cat)] + \ - pub_cats: - if pfx: - pkg_list = cat.fmris(last=True, - pubs=[pfx]) - else: - pkg_list = cat.fmris(last=True) - - for f in pkg_list: - nver, snver = newest.get(f.pkg_name, - (None, None)) - if f.version > nver: - newest[f.pkg_name] = (f.version, - str(f.version)) - - # Build list of installed packages. - inst_stems = {} - for t, entry in img_inst_cat.tuple_entries(): - states = entry["metadata"]["states"] - if pkgdefs.PKG_STATE_INSTALLED not in states: - continue - pub, stem, ver = t - inst_stems.setdefault(pub, {}) - inst_stems[pub].setdefault(stem, {}) - inst_stems[pub][stem][ver] = False - - # Now create composite known and installed catalogs. - compicat = pkg.catalog.Catalog(batch_mode=True, - sign=False) - compkcat = pkg.catalog.Catalog(batch_mode=True, - sign=False) - - sparts = ( - (pfx, cat, repo, name, cat.get_part(name, must_exist=True)) - for pfx, repo, cat in pub_cats - for name in cat.parts - ) - - excludes = self._img.list_excludes() - proc_stems = {} - for pfx, cat, repo, name, spart in sparts: - # 'spart' is the source part. - if spart is None: - # Client hasn't retrieved this part. - continue - - # New known part. - nkpart = compkcat.get_part(name) - nipart = compicat.get_part(name) - base = name.startswith("catalog.base.") - - # Avoid accessor overhead since these will be - # used for every entry. - cat_ver = cat.version - dp = cat.get_part("catalog.dependency.C", - must_exist=True) - - for t, sentry in spart.tuple_entries(pubs=[pfx]): - pub, stem, ver = t - - pkg_pub_map.setdefault(pub, {}) - pkg_pub_map[pub].setdefault(stem, {}) - pkg_pub_map[pub][stem].setdefault(ver, - set()) - pkg_pub_map[pub][stem][ver].add( - id(repo)) - - if pub in proc_stems and \ - stem in proc_stems[pub] and \ - ver in proc_stems[pub][stem]: - if id(cat) != proc_stems[pub][stem][ver]: - # Already added from another - # catalog. - continue - else: - proc_stems.setdefault(pub, {}) - proc_stems[pub].setdefault(stem, - {}) - proc_stems[pub][stem][ver] = \ - id(cat) - - installed = False - if pub in inst_stems and \ - stem in inst_stems[pub] and \ - ver in inst_stems[pub][stem]: - installed = True - inst_stems[pub][stem][ver] = \ - True - - # copy() is too slow here and catalog - # entries are shallow so this should be - # sufficient. - entry = dict(six.iteritems(sentry)) - if not base: - # Nothing else to do except add - # the entry for non-base catalog - # parts. - nkpart.add(metadata=entry, - op_time=op_time, pub=pub, - stem=stem, ver=ver) - if installed: - nipart.add( - metadata=entry, - op_time=op_time, - pub=pub, stem=stem, - ver=ver) - continue - - # Only the base catalog part stores - # package state information and/or - # other metadata. - mdata = {} - if installed: - mdata = dict( - img_inst_base.get_entry( - pub=pub, stem=stem, - ver=ver)["metadata"]) - - entry["metadata"] = mdata - - states = [pkgdefs.PKG_STATE_KNOWN, - pkgdefs.PKG_STATE_ALT_SOURCE] - if cat_ver == 0: - states.append( - pkgdefs.PKG_STATE_V0) - else: - # Assume V1 catalog source. - states.append( - pkgdefs.PKG_STATE_V1) - - if installed: - states.append( - pkgdefs.PKG_STATE_INSTALLED) - - nver, snver = newest.get(stem, - (None, None)) - if snver is not None and ver != snver: - states.append( - pkgdefs.PKG_STATE_UPGRADABLE) - - # Determine if package is obsolete or - # has been renamed and mark with - # appropriate state. - dpent = None - if dp is not None: - dpent = dp.get_entry(pub=pub, - stem=stem, ver=ver) - if dpent is not None: - for a in dpent["actions"]: - # Constructing action - # objects for every - # action would be a lot - # slower, so a simple - # string match is done - # first so that only - # interesting actions - # get constructed. - if not a.startswith("set"): - continue - if not ("pkg.obsolete" in a or \ - "pkg.renamed" in a or \ - "pkg.legacy" in a): - continue - - try: - act = pkg.actions.fromstr(a) - except pkg.actions.ActionError: - # If the action can't be - # parsed or is not yet - # supported, continue. - continue - - if act.attrs["value"].lower() != "true": - continue - - if act.attrs["name"] == "pkg.obsolete": - states.append( - pkgdefs.PKG_STATE_OBSOLETE) - elif act.attrs["name"] == "pkg.renamed": - if not act.include_this( - excludes, publisher=pub): - continue - states.append( - pkgdefs.PKG_STATE_RENAMED) - elif act.attrs["name"] == "pkg.legacy": - states.append( - pkgdefs.PKG_STATE_LEGACY) - - mdata["states"] = states - - # Add base entries. - nkpart.add(metadata=entry, - op_time=op_time, pub=pub, stem=stem, - ver=ver) - if installed: - nipart.add(metadata=entry, - op_time=op_time, pub=pub, - stem=stem, ver=ver) - - pub_map = {} - for pub in pubs: - try: - opub = pub_map[pub.prefix] - except KeyError: - nrepo = publisher.Repository() - opub = publisher.Publisher(pub.prefix, - catalog=compkcat, repository=nrepo) - pub_map[pub.prefix] = opub - - rid_map = {} - for pub in pkg_pub_map: - for stem in pkg_pub_map[pub]: - for ver in pkg_pub_map[pub][stem]: - rids = tuple(sorted( - pkg_pub_map[pub][stem][ver])) - - if rids not in rid_map: - # Create a publisher and - # repository for this - # unique set of origins. - origins = [] - list(map(origins.extend, [ - pkg_repos.get(rid).origins - for rid in rids - ])) - npub = \ - copy.copy(pub_map[pub]) - nrepo = npub.repository - nrepo.origins = origins - assert npub.catalog == \ - compkcat - rid_map[rids] = npub - - pkg_pub_map[pub][stem][ver] = \ - rid_map[rids] - - # Now consolidate all origins for each publisher under - # a single repository object for the caller. - for pub in pubs: - npub = pub_map[pub.prefix] - nrepo = npub.repository - for o in pub.repository.origins: - if not nrepo.has_origin(o): - nrepo.add_origin(o) - assert npub.catalog == compkcat - - for compcat in (compicat, compkcat): - compcat.batch_mode = False - compcat.finalize() - compcat.read_only = True - - # Cache these for future callers. - self.__alt_sources[eid] = (pkg_pub_map, - sorted(pub_map.values()), compkcat, compicat) - return self.__alt_sources[eid] - finally: - shutil.rmtree(tmpdir, ignore_errors=True) - self._img.cleanup_downloads() - - @_LockedGenerator() - def get_pkg_list(self, pkg_list, cats=None, collect_attrs=False, - patterns=misc.EmptyI, pubs=misc.EmptyI, raise_unmatched=False, - ranked=False, repos=None, return_fmris=False, variants=False): - """A generator function that produces tuples of the form: - - ( - ( - pub, - (string) the publisher of the package - stem, - (string) the name of the package - version - (string) the version of the package - ), - summary, - (string) the package summary - categories, - (list) string tuples of (scheme, category) - states, - (list) PackageInfo states - attributes - (dict) package attributes + nrepo = publisher.Repository() + opub = publisher.Publisher( + pub.prefix, catalog=compkcat, repository=nrepo ) + pub_map[pub.prefix] = opub + + rid_map = {} + for pub in pkg_pub_map: + for stem in pkg_pub_map[pub]: + for ver in pkg_pub_map[pub][stem]: + rids = tuple(sorted(pkg_pub_map[pub][stem][ver])) + + if rids not in rid_map: + # Create a publisher and + # repository for this + # unique set of origins. + origins = [] + list( + map( + origins.extend, + [ + pkg_repos.get(rid).origins + for rid in rids + ], + ) + ) + npub = copy.copy(pub_map[pub]) + nrepo = npub.repository + nrepo.origins = origins + assert npub.catalog == compkcat + rid_map[rids] = npub + + pkg_pub_map[pub][stem][ver] = rid_map[rids] + + # Now consolidate all origins for each publisher under + # a single repository object for the caller. + for pub in pubs: + npub = pub_map[pub.prefix] + nrepo = npub.repository + for o in pub.repository.origins: + if not nrepo.has_origin(o): + nrepo.add_origin(o) + assert npub.catalog == compkcat + + for compcat in (compicat, compkcat): + compcat.batch_mode = False + compcat.finalize() + compcat.read_only = True + + # Cache these for future callers. + self.__alt_sources[eid] = ( + pkg_pub_map, + sorted(pub_map.values()), + compkcat, + compicat, + ) + return self.__alt_sources[eid] + finally: + shutil.rmtree(tmpdir, ignore_errors=True) + self._img.cleanup_downloads() + + @_LockedGenerator() + def get_pkg_list( + self, + pkg_list, + cats=None, + collect_attrs=False, + patterns=misc.EmptyI, + pubs=misc.EmptyI, + raise_unmatched=False, + ranked=False, + repos=None, + return_fmris=False, + variants=False, + ): + """A generator function that produces tuples of the form: + + ( + ( + pub, - (string) the publisher of the package + stem, - (string) the name of the package + version - (string) the version of the package + ), + summary, - (string) the package summary + categories, - (list) string tuples of (scheme, category) + states, - (list) PackageInfo states + attributes - (dict) package attributes + ) + + Results are always sorted by stem, publisher, and then in + descending version order. + + 'pkg_list' is one of the following constant values indicating + what base set of package data should be used for results: + + LIST_ALL + All known packages. + + LIST_INSTALLED + Installed packages. + + LIST_INSTALLED_NEWEST + Installed packages and the newest + versions of packages not installed. + Renamed packages that are listed in + an installed incorporation will be + excluded unless they are installed. + + LIST_NEWEST + The newest versions of all known packages + that match the provided patterns and + other criteria. + + LIST_UPGRADABLE + Packages that are installed and upgradable. + + LIST_REMOVABLE + Packages that have no dependants + + 'cats' is an optional list of package category tuples of the + form (scheme, cat) to restrict the results to. If a package + is assigned to any of the given categories, it will be + returned. A value of [] will return packages not assigned + to any package category. A value of None indicates that no + package category filtering should be applied. + + 'collect_attrs' is an optional boolean that indicates whether + all package attributes should be collected and returned in the + fifth element of the return tuple. If False, that element will + be an empty dictionary. + + 'patterns' is an optional list of FMRI wildcard strings to + filter results by. + + 'pubs' is an optional list of publisher prefixes to restrict + the results to. + + 'raise_unmatched' is an optional boolean value that indicates + whether an InventoryException should be raised if any patterns + (after applying all other filtering and returning all results) + didn't match any packages. + + 'ranked' is an optional boolean value that indicates whether + only the matching package versions from the highest-ranked + publisher should be returned. This option is ignored for + patterns that explicitly specify the publisher to match. + + 'repos' is a list of URI strings or RepositoryURI objects that + represent the locations of package repositories to list packages + for. + + 'return_fmris' is an optional boolean value that indicates that + an FMRI object should be returned in place of the (pub, stem, + ver) tuple that is normally returned. + + 'variants' is an optional boolean value that indicates that + packages that are for arch or zone variants not applicable to + this image should be returned. + + Please note that this function may invoke network operations + to retrieve the requested package information.""" + + return self.__get_pkg_list( + pkg_list, + cats=cats, + collect_attrs=collect_attrs, + patterns=patterns, + pubs=pubs, + raise_unmatched=raise_unmatched, + ranked=ranked, + repos=repos, + return_fmris=return_fmris, + variants=variants, + ) + + def __get_pkg_refcounts(self, pkg_cat, excludes, pubs): + pkg_ref = set() + pkg_optref = set() + + cat_info = frozenset([pkg_cat.DEPENDENCY]) + + for pfmri, actions in pkg_cat.actions( + cat_info, excludes=excludes, pubs=pubs + ): + for a in actions: + # Always keep packages with an install-hold + if ( + a.name == "set" + and a.attrs["name"] == "pkg.depend.install-hold" + ): + pkg_ref.add(pfmri.pkg_name) + continue + if a.name != "depend": + continue + for f in a.attrlist("fmri"): + tgt = fmri.PkgFmri(f) + if a.attrs["type"] in ["require", "conditional"]: + pkg_ref.add(tgt.pkg_name) + if a.attrs["type"] == "optional": + pkg_optref.add(tgt.pkg_name) + return pkg_ref, pkg_optref + + def __get_pkg_list( + self, + pkg_list, + cats=None, + collect_attrs=False, + inst_cat=None, + known_cat=None, + patterns=misc.EmptyI, + pubs=misc.EmptyI, + raise_unmatched=False, + ranked=False, + repos=None, + return_fmris=False, + return_metadata=False, + variants=False, + ): + """This is the implementation of get_pkg_list. The other + function is a wrapper that uses locking. The separation was + necessary because of API functions that already perform locking + but need to use get_pkg_list(). This is a generator + function.""" + + installed = inst_newest = newest = upgradable = False + removable = False + if pkg_list == self.LIST_INSTALLED: + installed = True + elif pkg_list == self.LIST_INSTALLED_NEWEST: + inst_newest = True + elif pkg_list == self.LIST_NEWEST: + newest = True + elif pkg_list == self.LIST_UPGRADABLE: + upgradable = True + elif pkg_list == self.LIST_REMOVABLE: + removable = True + + # Each pattern in patterns can be a partial or full FMRI, so + # extract the individual components for use in filtering. + illegals = [] + pat_tuples = {} + pat_versioned = False + latest_pats = set() + seen = set() + npatterns = set() + for pat, error, pfmri, matcher in self.parse_fmri_patterns(patterns): + if error: + illegals.append(error) + continue + + # Duplicate patterns are ignored. + sfmri = str(pfmri) + if sfmri in seen: + # A different form of the same pattern + # was specified already; ignore this + # one (e.g. pkg:/network/ping, + # /network/ping). + continue + + # Track used patterns. + seen.add(sfmri) + npatterns.add(pat) + + if "@" in pat: + # Mark that a pattern contained version + # information. This is used for a listing + # optimization later on. + pat_versioned = True + if getattr(pfmri.version, "match_latest", None): + latest_pats.add(pat) + pat_tuples[pat] = (pfmri.tuple(), matcher) + + patterns = npatterns + del npatterns, seen + + if illegals: + raise apx.InventoryException(illegal=illegals) + + if repos: + ignored, ignored, known_cat, inst_cat = self.__get_alt_pkg_data( + repos + ) + + # For LIST_INSTALLED_NEWEST, installed packages need to be + # determined and incorporation and publisher relationships + # mapped. + if inst_newest: + ( + pub_ranks, + inc_stems, + inc_vers, + inst_stems, + ren_stems, + ren_inst_stems, + ) = self.__map_installed_newest(pubs, known_cat=known_cat) + else: + pub_ranks = ( + inc_stems + ) = ( + inc_vers + ) = inst_stems = ren_stems = ren_inst_stems = misc.EmptyDict + + if installed or upgradable or removable: + if inst_cat: + pkg_cat = inst_cat + else: + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_INSTALLED) + + # Don't need to perform variant filtering if only + # listing installed packages. + variants = True + elif known_cat: + pkg_cat = known_cat + else: + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_KNOWN) + + cat_info = frozenset([pkg_cat.DEPENDENCY, pkg_cat.SUMMARY]) + + # Keep track of when the newest version has been found for + # each incorporated stem. + slist = set() + + # Keep track of listed stems for all other packages on a + # per-publisher basis. + nlist = collections.defaultdict(int) + + def check_state(t, entry): + states = entry["metadata"]["states"] + pkgi = pkgdefs.PKG_STATE_INSTALLED in states + pkgu = pkgdefs.PKG_STATE_UPGRADABLE in states + pub, stem, ver = t + + if upgradable: + # If package is marked upgradable, return it. + return pkgu + elif removable: + return not stem in pkg_ref + elif pkgi: + # Nothing more to do here. + return True + elif stem in inst_stems: + # Some other version of this package is + # installed, so this one should not be + # returned. + return False - Results are always sorted by stem, publisher, and then in - descending version order. - - 'pkg_list' is one of the following constant values indicating - what base set of package data should be used for results: - - LIST_ALL - All known packages. - - LIST_INSTALLED - Installed packages. - - LIST_INSTALLED_NEWEST - Installed packages and the newest - versions of packages not installed. - Renamed packages that are listed in - an installed incorporation will be - excluded unless they are installed. - - LIST_NEWEST - The newest versions of all known packages - that match the provided patterns and - other criteria. - - LIST_UPGRADABLE - Packages that are installed and upgradable. - - LIST_REMOVABLE - Packages that have no dependants - - 'cats' is an optional list of package category tuples of the - form (scheme, cat) to restrict the results to. If a package - is assigned to any of the given categories, it will be - returned. A value of [] will return packages not assigned - to any package category. A value of None indicates that no - package category filtering should be applied. - - 'collect_attrs' is an optional boolean that indicates whether - all package attributes should be collected and returned in the - fifth element of the return tuple. If False, that element will - be an empty dictionary. - - 'patterns' is an optional list of FMRI wildcard strings to - filter results by. - - 'pubs' is an optional list of publisher prefixes to restrict - the results to. - - 'raise_unmatched' is an optional boolean value that indicates - whether an InventoryException should be raised if any patterns - (after applying all other filtering and returning all results) - didn't match any packages. - - 'ranked' is an optional boolean value that indicates whether - only the matching package versions from the highest-ranked - publisher should be returned. This option is ignored for - patterns that explicitly specify the publisher to match. - - 'repos' is a list of URI strings or RepositoryURI objects that - represent the locations of package repositories to list packages - for. - - 'return_fmris' is an optional boolean value that indicates that - an FMRI object should be returned in place of the (pub, stem, - ver) tuple that is normally returned. - - 'variants' is an optional boolean value that indicates that - packages that are for arch or zone variants not applicable to - this image should be returned. - - Please note that this function may invoke network operations - to retrieve the requested package information.""" - - return self.__get_pkg_list(pkg_list, cats=cats, - collect_attrs=collect_attrs, patterns=patterns, pubs=pubs, - raise_unmatched=raise_unmatched, ranked=ranked, repos=repos, - return_fmris=return_fmris, variants=variants) - - - def __get_pkg_refcounts(self, pkg_cat, excludes, pubs): - pkg_ref = set() - pkg_optref = set() - - cat_info = frozenset([pkg_cat.DEPENDENCY]) - - for pfmri, actions in pkg_cat.actions(cat_info, - excludes=excludes, pubs=pubs): - for a in actions: - # Always keep packages with an install-hold - if (a.name == 'set' and a.attrs['name'] == - 'pkg.depend.install-hold'): - pkg_ref.add(pfmri.pkg_name) - continue - if a.name != 'depend': continue - for f in a.attrlist('fmri'): - tgt = fmri.PkgFmri(f) - if a.attrs['type'] in [ - 'require', - 'conditional' - ]: - pkg_ref.add(tgt.pkg_name) - if a.attrs['type'] == 'optional': - pkg_optref.add(tgt.pkg_name) - return pkg_ref, pkg_optref - - def __get_pkg_list(self, pkg_list, cats=None, collect_attrs=False, - inst_cat=None, known_cat=None, patterns=misc.EmptyI, - pubs=misc.EmptyI, raise_unmatched=False, ranked=False, repos=None, - return_fmris=False, return_metadata=False, variants=False): - """This is the implementation of get_pkg_list. The other - function is a wrapper that uses locking. The separation was - necessary because of API functions that already perform locking - but need to use get_pkg_list(). This is a generator - function.""" - - installed = inst_newest = newest = upgradable = False - removable = False - if pkg_list == self.LIST_INSTALLED: - installed = True - elif pkg_list == self.LIST_INSTALLED_NEWEST: - inst_newest = True - elif pkg_list == self.LIST_NEWEST: - newest = True - elif pkg_list == self.LIST_UPGRADABLE: - upgradable = True - elif pkg_list == self.LIST_REMOVABLE: - removable = True - - # Each pattern in patterns can be a partial or full FMRI, so - # extract the individual components for use in filtering. - illegals = [] - pat_tuples = {} - pat_versioned = False - latest_pats = set() - seen = set() - npatterns = set() - for pat, error, pfmri, matcher in self.parse_fmri_patterns( - patterns): - if error: - illegals.append(error) - continue + # Attempt to determine if this package is installed + # under a different name or constrained under a + # different name. + tgt = ren_stems.get(stem, None) + while tgt is not None: + if tgt in inc_vers: + # Package is incorporated under a + # different name, so allow this + # to fallthrough to the incoporation + # evaluation. + break + elif tgt in inst_stems: + # Package is installed under a + # different name, so skip it. + return False + tgt = ren_stems.get(tgt, None) + + # Attempt to find a suitable version to return. + if stem in inc_vers: + # For package stems that are incorporated, only + # return the newest successor version based on + # publisher rank. + if stem in slist: + # Newest version already returned. + return False + + if stem in inc_stems and pub != inc_stems[stem]: + # This entry is for a lower-ranked + # publisher. + return False + + # XXX version should not require build release. + ever = pkg.version.Version(ver) + + # If the entry's version is a successor to + # the incorporated version, then this is the + # 'newest' version of this package since + # entries are processed in descending version + # order. + iver = inc_vers[stem] + if ever.is_successor(iver, pkg.version.CONSTRAINT_AUTO): + slist.add(stem) + return True + return False - # Duplicate patterns are ignored. - sfmri = str(pfmri) - if sfmri in seen: - # A different form of the same pattern - # was specified already; ignore this - # one (e.g. pkg:/network/ping, - # /network/ping). - continue + pkg_stem = "!".join((pub, stem)) + if pkg_stem in nlist: + # A newer version has already been listed for + # this stem and publisher. + return False + return True + + filter_cb = None + if inst_newest or upgradable or removable: + # Filtering needs to be applied. + filter_cb = check_state + + excludes = self._img.list_excludes() + img_variants = self._img.get_variants() + + if removable: + pkg_ref, pkg_optref = self.__get_pkg_refcounts( + pkg_cat, excludes, pubs + ) + + matched_pats = set() + pkg_matching_pats = None + + # Retrieve only the newest package versions for LIST_NEWEST if + # none of the patterns have version information and variants are + # included. (This cuts down on the number of entries that have + # to be filtered.) + use_last = newest and not pat_versioned and variants + + if ranked: + # If caller requested results to be ranked by publisher, + # then the list of publishers to return must be passed + # to entry_actions() in rank order. + pub_ranks = self._img.get_publisher_ranks() + if not pubs: + # It's important that the list of possible + # publishers is gleaned from the catalog + # directly and not image configuration so + # that temporary sources (archives, etc.) + # work as expected. + pubs = pkg_cat.publishers() + for p in pubs: + pub_ranks.setdefault(p, (99, (p, False, False))) + + def pub_key(a): + return (pub_ranks[a], a) + + pubs = sorted(pubs, key=pub_key) + + # Too many nested blocks; + # pylint: disable=R0101 + ranked_stems = {} + for t, entry, actions in pkg_cat.entry_actions( + cat_info, + cb=filter_cb, + excludes=excludes, + last=use_last, + ordered=True, + pubs=pubs, + ): + pub, stem, ver = t + + omit_ver = False + omit_package = None + + pkg_stem = "!".join((pub, stem)) + if newest and pkg_stem in nlist: + # A newer version has already been listed, so + # any additional entries need to be marked for + # omission before continuing. + omit_package = True + elif ranked and not patterns and ranked_stems.get(stem, pub) != pub: + # A different version from a higher-ranked + # publisher has been returned already, so skip + # this one. This can only be done safely at + # this point if no patterns have been specified, + # since publisher-specific patterns override + # ranking behaviour. + omit_package = True + else: + nlist[pkg_stem] += 1 + + if raise_unmatched: + pkg_matching_pats = set() + if not omit_package: + ever = None + for pat in patterns: + (pat_pub, pat_stem, pat_ver), matcher = pat_tuples[pat] - # Track used patterns. - seen.add(sfmri) - npatterns.add(pat) - - if "@" in pat: - # Mark that a pattern contained version - # information. This is used for a listing - # optimization later on. - pat_versioned = True - if getattr(pfmri.version, "match_latest", None): - latest_pats.add(pat) - pat_tuples[pat] = (pfmri.tuple(), matcher) - - patterns = npatterns - del npatterns, seen - - if illegals: - raise apx.InventoryException(illegal=illegals) - - if repos: - ignored, ignored, known_cat, inst_cat = \ - self.__get_alt_pkg_data(repos) - - # For LIST_INSTALLED_NEWEST, installed packages need to be - # determined and incorporation and publisher relationships - # mapped. - if inst_newest: - pub_ranks, inc_stems, inc_vers, inst_stems, ren_stems, \ - ren_inst_stems = self.__map_installed_newest( - pubs, known_cat=known_cat) - else: - pub_ranks = inc_stems = inc_vers = inst_stems = \ - ren_stems = ren_inst_stems = misc.EmptyDict - - if installed or upgradable or removable: - if inst_cat: - pkg_cat = inst_cat - else: - pkg_cat = self._img.get_catalog( - self._img.IMG_CATALOG_INSTALLED) - - # Don't need to perform variant filtering if only - # listing installed packages. - variants = True - elif known_cat: - pkg_cat = known_cat - else: - pkg_cat = self._img.get_catalog( - self._img.IMG_CATALOG_KNOWN) - - cat_info = frozenset([pkg_cat.DEPENDENCY, pkg_cat.SUMMARY]) - - # Keep track of when the newest version has been found for - # each incorporated stem. - slist = set() - - # Keep track of listed stems for all other packages on a - # per-publisher basis. - nlist = collections.defaultdict(int) - - def check_state(t, entry): - states = entry["metadata"]["states"] - pkgi = pkgdefs.PKG_STATE_INSTALLED in states - pkgu = pkgdefs.PKG_STATE_UPGRADABLE in states - pub, stem, ver = t - - if upgradable: - # If package is marked upgradable, return it. - return pkgu - elif removable: - return not stem in pkg_ref - elif pkgi: - # Nothing more to do here. - return True - elif stem in inst_stems: - # Some other version of this package is - # installed, so this one should not be - # returned. - return False - - # Attempt to determine if this package is installed - # under a different name or constrained under a - # different name. - tgt = ren_stems.get(stem, None) - while tgt is not None: - if tgt in inc_vers: - # Package is incorporated under a - # different name, so allow this - # to fallthrough to the incoporation - # evaluation. - break - elif tgt in inst_stems: - # Package is installed under a - # different name, so skip it. - return False - tgt = ren_stems.get(tgt, None) - - # Attempt to find a suitable version to return. - if stem in inc_vers: - # For package stems that are incorporated, only - # return the newest successor version based on - # publisher rank. - if stem in slist: - # Newest version already returned. - return False - - if stem in inc_stems and \ - pub != inc_stems[stem]: - # This entry is for a lower-ranked - # publisher. - return False - - # XXX version should not require build release. - ever = pkg.version.Version(ver) - - # If the entry's version is a successor to - # the incorporated version, then this is the - # 'newest' version of this package since - # entries are processed in descending version - # order. - iver = inc_vers[stem] - if ever.is_successor(iver, - pkg.version.CONSTRAINT_AUTO): - slist.add(stem) - return True - return False - - pkg_stem = "!".join((pub, stem)) - if pkg_stem in nlist: - # A newer version has already been listed for - # this stem and publisher. - return False - return True - - filter_cb = None - if inst_newest or upgradable or removable: - # Filtering needs to be applied. - filter_cb = check_state - - excludes = self._img.list_excludes() - img_variants = self._img.get_variants() - - if removable: - pkg_ref, pkg_optref = self.__get_pkg_refcounts( - pkg_cat, excludes, pubs) - - matched_pats = set() - pkg_matching_pats = None - - # Retrieve only the newest package versions for LIST_NEWEST if - # none of the patterns have version information and variants are - # included. (This cuts down on the number of entries that have - # to be filtered.) - use_last = newest and not pat_versioned and variants - - if ranked: - # If caller requested results to be ranked by publisher, - # then the list of publishers to return must be passed - # to entry_actions() in rank order. - pub_ranks = self._img.get_publisher_ranks() - if not pubs: - # It's important that the list of possible - # publishers is gleaned from the catalog - # directly and not image configuration so - # that temporary sources (archives, etc.) - # work as expected. - pubs = pkg_cat.publishers() - for p in pubs: - pub_ranks.setdefault(p, (99, (p, False, False))) - - def pub_key(a): - return (pub_ranks[a], a) - - pubs = sorted(pubs, key=pub_key) - - # Too many nested blocks; - # pylint: disable=R0101 - ranked_stems = {} - for t, entry, actions in pkg_cat.entry_actions(cat_info, - cb=filter_cb, excludes=excludes, last=use_last, - ordered=True, pubs=pubs): - pub, stem, ver = t - - omit_ver = False - omit_package = None - - pkg_stem = "!".join((pub, stem)) - if newest and pkg_stem in nlist: - # A newer version has already been listed, so - # any additional entries need to be marked for - # omission before continuing. + if pat_pub is not None and pub != pat_pub: + # Publisher doesn't match. + if omit_package is None: + omit_package = True + continue + elif ( + ranked + and not pat_pub + and ranked_stems.get(stem, pub) != pub + ): + # A different version from a + # higher-ranked publisher has + # been returned already, so skip + # this one since no publisher + # was specified for the pattern. + if omit_package is None: + omit_package = True + continue + + if matcher == self.MATCH_EXACT: + if pat_stem != stem: + # Stem doesn't match. + if omit_package is None: omit_package = True - elif ranked and not patterns and \ - ranked_stems.get(stem, pub) != pub: - # A different version from a higher-ranked - # publisher has been returned already, so skip - # this one. This can only be done safely at - # this point if no patterns have been specified, - # since publisher-specific patterns override - # ranking behaviour. + continue + elif matcher == self.MATCH_FMRI: + if not ("/" + stem).endswith("/" + pat_stem): + # Stem doesn't match. + if omit_package is None: omit_package = True - else: - nlist[pkg_stem] += 1 - - if raise_unmatched: - pkg_matching_pats = set() - if not omit_package: - ever = None - for pat in patterns: - (pat_pub, pat_stem, pat_ver), matcher = \ - pat_tuples[pat] - - if pat_pub is not None and \ - pub != pat_pub: - # Publisher doesn't match. - if omit_package is None: - omit_package = True - continue - elif ranked and not pat_pub and \ - ranked_stems.get(stem, pub) != pub: - # A different version from a - # higher-ranked publisher has - # been returned already, so skip - # this one since no publisher - # was specified for the pattern. - if omit_package is None: - omit_package = True - continue - - if matcher == self.MATCH_EXACT: - if pat_stem != stem: - # Stem doesn't match. - if omit_package is None: - omit_package = \ - True - continue - elif matcher == self.MATCH_FMRI: - if not ("/" + stem).endswith( - "/" + pat_stem): - # Stem doesn't match. - if omit_package is None: - omit_package = \ - True - continue - elif matcher == self.MATCH_GLOB: - if not fnmatch.fnmatchcase(stem, - pat_stem): - # Stem doesn't match. - if omit_package is None: - omit_package = \ - True - continue - - if pat_ver is not None: - if ever is None: - # Avoid constructing a - # version object more - # than once for each - # entry. - ever = pkg.version.Version(ver) - if not ever.is_successor(pat_ver, - pkg.version.CONSTRAINT_AUTO): - if omit_package is None: - omit_package = \ - True - omit_ver = True - continue - - if pat in latest_pats and \ - nlist[pkg_stem] > 1: - # Package allowed by pattern, - # but isn't the "latest" - # version. - if omit_package is None: - omit_package = True - omit_ver = True - continue - - # If this entry matched at least one - # pattern, then ensure it is returned. - omit_package = False - if not raise_unmatched: - # It's faster to stop as soon - # as a match is found. - break - - # If caller has requested other match - # cases be raised as an exception, then - # all patterns must be tested for every - # entry. This is slower, so only done - # if necessary. - pkg_matching_pats.add(pat) - - if omit_package: - # Package didn't match critera; skip it. - if (filter_cb is not None or (newest and - pat_versioned)) and omit_ver and \ - nlist[pkg_stem] == 1: - # If omitting because of version, and - # no other versions have been returned - # yet for this stem, then discard - # tracking entry so that other - # versions will be listed. - del nlist[pkg_stem] - slist.discard(stem) - continue - - # Perform image arch and zone variant filtering so - # that only packages appropriate for this image are - # returned, but only do this for packages that are - # not installed. - pcats = [] - pkgr = False - unsupported = False - summ = None - targets = set() - - omit_var = False - mdata = entry["metadata"] - states = mdata["states"] - pkgi = pkgdefs.PKG_STATE_INSTALLED in states - ddm = lambda: collections.defaultdict(list) - attrs = collections.defaultdict(ddm) - try: - for a in actions: - if a.name == "depend" and \ - a.attrs["type"] == "require": - targets.add(a.attrs["fmri"]) - continue - if a.name != "set": - continue - - atname = a.attrs["name"] - atvalue = a.attrs["value"] - if collect_attrs: - atvlist = a.attrlist("value") - - # XXX Need to describe this data - # structure sanely somewhere. - mods = tuple( - (k, tuple(sorted(a.attrlist(k)))) - for k in sorted(six.iterkeys(a.attrs)) - if k not in ("name", "value") - ) - attrs[atname][mods].extend(atvlist) - - if atname == "pkg.summary": - summ = atvalue - continue - - if atname == "description": - if summ is None: - # Historical summary - # field. - summ = atvalue - # pylint: disable=W0106 - collect_attrs and \ - attrs["pkg.summary"] \ - [mods]. \ - extend(atvlist) - continue - - if atname == "info.classification": - pcats.extend( - a.parse_category_info()) - - if pkgi: - # No filtering for installed - # packages. - continue - - # Rename filtering should only be - # performed for incorporated packages - # at this point. - if atname == "pkg.renamed": - if stem in inc_vers: - pkgr = True - continue - - if variants or \ - not atname.startswith("variant."): - # No variant filtering required. - continue - - # For all variants explicitly set in the - # image, elide packages that are not for - # a matching variant value. - is_list = type(atvalue) == list - for vn, vv in six.iteritems(img_variants): - if vn == atname and \ - ((is_list and - vv not in atvalue) or \ - (not is_list and - vv != atvalue)): - omit_package = True - omit_var = True - break - except apx.InvalidPackageErrors: - # Ignore errors for packages that have invalid - # or unsupported metadata. This is necessary so - # that API consumers can discover new package - # data that may be needed to perform an upgrade - # so that the API can understand them. - states = set(states) - states.add(PackageInfo.UNSUPPORTED) - unsupported = True - - if not pkgi and pkgr and stem in inc_vers: - # If the package is not installed, but this is - # the terminal version entry for the stem and - # it is an incorporated package, then omit the - # package if it has been installed or is - # incorporated using one of the new names. - for e in targets: - tgt = e - while tgt is not None: - if tgt in ren_inst_stems or \ - tgt in inc_vers: - omit_package = True - break - tgt = ren_stems.get(tgt, None) - - if omit_package: - # Package didn't match criteria; skip it. - if (filter_cb is not None or newest) and \ - omit_var and nlist[pkg_stem] == 1: - # If omitting because of variant, and - # no other versions have been returned - # yet for this stem, then discard - # tracking entry so that other - # versions will be listed. - del nlist[pkg_stem] - slist.discard(stem) - continue - - if cats is not None: - if not cats: - if pcats: - # Only want packages with no - # categories. - continue - elif not [sc for sc in cats if sc in pcats]: - # Package doesn't match specified - # category criteria. - continue - - if removable and stem in pkg_optref: - states.append(pkgdefs.PKG_STATE_OPTIONAL) - - # Return the requested package data. - if not unsupported: - # Prevent modification of state data. - states = frozenset(states) - - if raise_unmatched: - # Only after all other filtering has been - # applied are the patterns that the package - # matched considered "matching". - matched_pats.update(pkg_matching_pats) - if ranked: - # Only after all other filtering has been - # applied is the stem considered to have been - # a "ranked" match. - ranked_stems.setdefault(stem, pub) - - if return_fmris: - pfmri = fmri.PkgFmri(name=stem, publisher=pub, - version=ver) - if return_metadata: - yield (pfmri, summ, pcats, states, - attrs, mdata) - else: - yield (pfmri, summ, pcats, states, - attrs) - else: - if return_metadata: - yield (t, summ, pcats, states, - attrs, mdata) - else: - yield (t, summ, pcats, states, - attrs) - - if raise_unmatched: - # Caller has requested that non-matching patterns or - # patterns that match multiple packages cause an - # exception to be raised. - notfound = set(pat_tuples.keys()) - matched_pats - if raise_unmatched and notfound: - raise apx.InventoryException(notfound=notfound) - - @_LockedCancelable() - def info(self, fmri_strings, local, info_needed, ranked=False, - repos=None): - """Gathers information about fmris. fmri_strings is a list - of fmri_names for which information is desired. local - determines whether to retrieve the information locally - (if possible). It returns a dictionary of lists. The keys - for the dictionary are the constants specified in the class - definition. The values are lists of PackageInfo objects or - strings. - - 'ranked' is an optional boolean value that indicates whether - only the matching package versions from the highest-ranked - publisher should be returned. This option is ignored for - patterns that explicitly specify the publisher to match. - - 'repos' is a list of URI strings or RepositoryURI objects that - represent the locations of packages to return information for. - """ + continue + elif matcher == self.MATCH_GLOB: + if not fnmatch.fnmatchcase(stem, pat_stem): + # Stem doesn't match. + if omit_package is None: + omit_package = True + continue + + if pat_ver is not None: + if ever is None: + # Avoid constructing a + # version object more + # than once for each + # entry. + ever = pkg.version.Version(ver) + if not ever.is_successor( + pat_ver, pkg.version.CONSTRAINT_AUTO + ): + if omit_package is None: + omit_package = True + omit_ver = True + continue + + if pat in latest_pats and nlist[pkg_stem] > 1: + # Package allowed by pattern, + # but isn't the "latest" + # version. + if omit_package is None: + omit_package = True + omit_ver = True + continue - bad_opts = info_needed - PackageInfo.ALL_OPTIONS - if bad_opts: - raise apx.UnrecognizedOptionsToInfo(bad_opts) + # If this entry matched at least one + # pattern, then ensure it is returned. + omit_package = False + if not raise_unmatched: + # It's faster to stop as soon + # as a match is found. + break + + # If caller has requested other match + # cases be raised as an exception, then + # all patterns must be tested for every + # entry. This is slower, so only done + # if necessary. + pkg_matching_pats.add(pat) + + if omit_package: + # Package didn't match critera; skip it. + if ( + (filter_cb is not None or (newest and pat_versioned)) + and omit_ver + and nlist[pkg_stem] == 1 + ): + # If omitting because of version, and + # no other versions have been returned + # yet for this stem, then discard + # tracking entry so that other + # versions will be listed. + del nlist[pkg_stem] + slist.discard(stem) + continue + + # Perform image arch and zone variant filtering so + # that only packages appropriate for this image are + # returned, but only do this for packages that are + # not installed. + pcats = [] + pkgr = False + unsupported = False + summ = None + targets = set() + + omit_var = False + mdata = entry["metadata"] + states = mdata["states"] + pkgi = pkgdefs.PKG_STATE_INSTALLED in states + ddm = lambda: collections.defaultdict(list) + attrs = collections.defaultdict(ddm) + try: + for a in actions: + if a.name == "depend" and a.attrs["type"] == "require": + targets.add(a.attrs["fmri"]) + continue + if a.name != "set": + continue - self.log_operation_start("info") + atname = a.attrs["name"] + atvalue = a.attrs["value"] + if collect_attrs: + atvlist = a.attrlist("value") + + # XXX Need to describe this data + # structure sanely somewhere. + mods = tuple( + (k, tuple(sorted(a.attrlist(k)))) + for k in sorted(six.iterkeys(a.attrs)) + if k not in ("name", "value") + ) + attrs[atname][mods].extend(atvlist) - # Common logic for image and temp repos case. - if local: - ilist = self.LIST_INSTALLED - else: - # Verify validity of certificates before attempting - # network operations. - self.__cert_verify(log_op_end=[apx.CertificateError]) - ilist = self.LIST_NEWEST - - # The pkg_pub_map is only populated when temp repos are - # specified and maps packages to the repositories that - # contain them for manifest retrieval. - pkg_pub_map = None - known_cat = None - inst_cat = None - if repos: - pkg_pub_map, ignored, known_cat, inst_cat = \ - self.__get_alt_pkg_data(repos) - if local: - pkg_cat = inst_cat - else: - pkg_cat = known_cat - elif local: - pkg_cat = self._img.get_catalog( - self._img.IMG_CATALOG_INSTALLED) - if not fmri_strings and pkg_cat.package_count == 0: - self.log_operation_end( - result=RESULT_NOTHING_TO_DO) - raise apx.NoPackagesInstalledException() - else: - pkg_cat = self._img.get_catalog( - self._img.IMG_CATALOG_KNOWN) + if atname == "pkg.summary": + summ = atvalue + continue - excludes = self._img.list_excludes() + if atname == "description": + if summ is None: + # Historical summary + # field. + summ = atvalue + # pylint: disable=W0106 + collect_attrs and attrs["pkg.summary"][mods].extend( + atvlist + ) + continue - # Set of options that can use catalog data. - cat_opts = frozenset([PackageInfo.DESCRIPTION, - PackageInfo.DEPENDENCIES]) + if atname == "info.classification": + pcats.extend(a.parse_category_info()) - # Set of options that require manifest retrieval. - act_opts = PackageInfo.ACTION_OPTIONS - \ - frozenset([PackageInfo.DEPENDENCIES]) + if pkgi: + # No filtering for installed + # packages. + continue - collect_attrs = PackageInfo.ALL_ATTRIBUTES in info_needed + # Rename filtering should only be + # performed for incorporated packages + # at this point. + if atname == "pkg.renamed": + if stem in inc_vers: + pkgr = True + continue - pis = [] - rval = { - self.INFO_FOUND: pis, - self.INFO_MISSING: misc.EmptyI, - self.INFO_ILLEGALS: misc.EmptyI, - } + if variants or not atname.startswith("variant."): + # No variant filtering required. + continue - try: - for pfmri, summary, cats, states, attrs, mdata in \ - self.__get_pkg_list( - ilist, collect_attrs=collect_attrs, - inst_cat=inst_cat, known_cat=known_cat, - patterns=fmri_strings, raise_unmatched=True, - ranked=ranked, return_fmris=True, - return_metadata=True, variants=True): - release = build_release = branch = \ - packaging_date = None - - pub, name, version = pfmri.tuple() - alt_pub = None - if pkg_pub_map: - alt_pub = \ - pkg_pub_map[pub][name][str(version)] - - if PackageInfo.IDENTITY in info_needed: - release = version.release - build_release = version.build_release - branch = version.branch - packaging_date = \ - version.get_timestamp().strftime( - "%c") - else: - pub = name = version = None - - links = hardlinks = files = dirs = \ - csize = size = licenses = cat_info = \ - description = None - - if PackageInfo.CATEGORIES in info_needed: - cat_info = [ - PackageCategory(scheme, cat) - for scheme, cat in cats - ] - - ret_cat_data = cat_opts & info_needed - dependencies = None - unsupported = False - if ret_cat_data: - try: - ignored, description, ignored, \ - dependencies = \ - _get_pkg_cat_data(pkg_cat, - ret_cat_data, - excludes=excludes, - pfmri=pfmri) - except apx.InvalidPackageErrors: - # If the information can't be - # retrieved because the manifest - # can't be parsed, mark it and - # continue. - unsupported = True - - if dependencies is None: - dependencies = misc.EmptyI - - mfst = None - if not unsupported and \ - (frozenset([PackageInfo.SIZE, - PackageInfo.LICENSES]) | act_opts) & \ - info_needed: - try: - mfst = self._img.get_manifest( - pfmri, alt_pub=alt_pub) - except apx.InvalidPackageErrors: - # If the information can't be - # retrieved because the manifest - # can't be parsed, mark it and - # continue. - unsupported = True - - if mfst is not None: - if PackageInfo.LICENSES in info_needed: - licenses = self.__licenses(pfmri, - mfst, alt_pub=alt_pub) - - if PackageInfo.SIZE in info_needed: - size, csize = mfst.get_size( - excludes=excludes) - - if act_opts & info_needed: - if PackageInfo.LINKS in info_needed: - links = list( - mfst.gen_key_attribute_value_by_type( - "link", excludes)) - if PackageInfo.HARDLINKS in info_needed: - hardlinks = list( - mfst.gen_key_attribute_value_by_type( - "hardlink", excludes)) - if PackageInfo.FILES in info_needed: - files = list( - mfst.gen_key_attribute_value_by_type( - "file", excludes)) - if PackageInfo.DIRS in info_needed: - dirs = list( - mfst.gen_key_attribute_value_by_type( - "dir", excludes)) - elif PackageInfo.SIZE in info_needed: - size = csize = 0 - - # Trim response set. - last_install = None - last_update = None - if PackageInfo.STATE in info_needed: - if unsupported is True and \ - PackageInfo.UNSUPPORTED not in states: - # Mark package as - # unsupported so that - # caller can decide - # what to do. - states = set(states) - states.add( - PackageInfo.UNSUPPORTED) - - if "last-update" in mdata: - last_update = catalog.basic_ts_to_datetime( - mdata["last-update"]).strftime("%c") - if "last-install" in mdata: - last_install = catalog.basic_ts_to_datetime( - mdata["last-install"]).strftime("%c") - else: - states = misc.EmptyI - - if PackageInfo.CATEGORIES not in info_needed: - cats = None - if PackageInfo.SUMMARY in info_needed: - if summary is None: - summary = "" - else: - summary = None - - pis.append(PackageInfo(pkg_stem=name, - summary=summary, category_info_list=cat_info, - states=states, publisher=pub, version=release, - build_release=build_release, branch=branch, - packaging_date=packaging_date, size=size, - csize=csize, pfmri=pfmri, licenses=licenses, - links=links, hardlinks=hardlinks, files=files, - dirs=dirs, dependencies=dependencies, - description=description, attrs=attrs, - last_update=last_update, - last_install=last_install)) - except apx.InventoryException as e: - if e.illegal: - self.log_operation_end( - result=RESULT_FAILED_BAD_REQUEST) - rval[self.INFO_MISSING] = e.notfound - rval[self.INFO_ILLEGALS] = e.illegal + # For all variants explicitly set in the + # image, elide packages that are not for + # a matching variant value. + is_list = type(atvalue) == list + for vn, vv in six.iteritems(img_variants): + if vn == atname and ( + (is_list and vv not in atvalue) + or (not is_list and vv != atvalue) + ): + omit_package = True + omit_var = True + break + except apx.InvalidPackageErrors: + # Ignore errors for packages that have invalid + # or unsupported metadata. This is necessary so + # that API consumers can discover new package + # data that may be needed to perform an upgrade + # so that the API can understand them. + states = set(states) + states.add(PackageInfo.UNSUPPORTED) + unsupported = True + + if not pkgi and pkgr and stem in inc_vers: + # If the package is not installed, but this is + # the terminal version entry for the stem and + # it is an incorporated package, then omit the + # package if it has been installed or is + # incorporated using one of the new names. + for e in targets: + tgt = e + while tgt is not None: + if tgt in ren_inst_stems or tgt in inc_vers: + omit_package = True + break + tgt = ren_stems.get(tgt, None) + + if omit_package: + # Package didn't match criteria; skip it. + if ( + (filter_cb is not None or newest) + and omit_var + and nlist[pkg_stem] == 1 + ): + # If omitting because of variant, and + # no other versions have been returned + # yet for this stem, then discard + # tracking entry so that other + # versions will be listed. + del nlist[pkg_stem] + slist.discard(stem) + continue + + if cats is not None: + if not cats: + if pcats: + # Only want packages with no + # categories. + continue + elif not [sc for sc in cats if sc in pcats]: + # Package doesn't match specified + # category criteria. + continue + + if removable and stem in pkg_optref: + states.append(pkgdefs.PKG_STATE_OPTIONAL) + + # Return the requested package data. + if not unsupported: + # Prevent modification of state data. + states = frozenset(states) + + if raise_unmatched: + # Only after all other filtering has been + # applied are the patterns that the package + # matched considered "matching". + matched_pats.update(pkg_matching_pats) + if ranked: + # Only after all other filtering has been + # applied is the stem considered to have been + # a "ranked" match. + ranked_stems.setdefault(stem, pub) + + if return_fmris: + pfmri = fmri.PkgFmri(name=stem, publisher=pub, version=ver) + if return_metadata: + yield (pfmri, summ, pcats, states, attrs, mdata) else: - if pis: - self.log_operation_end() - else: - self.log_operation_end( - result=RESULT_NOTHING_TO_DO) - return rval - - def can_be_canceled(self): - """Returns true if the API is in a cancelable state.""" - return self.__can_be_canceled - - def _disable_cancel(self): - """Sets_can_be_canceled to False in a way that prevents missed - wakeups. This may raise CanceledException, if a - cancellation is pending.""" - - self.__cancel_lock.acquire() - if self.__canceling: - self.__cancel_lock.release() - self._img.transport.reset() - raise apx.CanceledException() + yield (pfmri, summ, pcats, states, attrs) + else: + if return_metadata: + yield (t, summ, pcats, states, attrs, mdata) else: - self.__set_can_be_canceled(False) - self.__cancel_lock.release() - - def _enable_cancel(self): - """Sets can_be_canceled to True while grabbing the cancel - locks. The caller must still hold the activity lock while - calling this function.""" - - self.__cancel_lock.acquire() - self.__set_can_be_canceled(True) - self.__cancel_lock.release() - - def __set_can_be_canceled(self, status): - """Private method. Handles the details of changing the - cancelable state.""" - assert self.__cancel_lock._is_owned() - - # If caller requests a change to current state there is - # nothing to do. - if self.__can_be_canceled == status: - return - - if status: - # Callers must hold activity lock for operations - # that they will make cancelable. - assert self._activity_lock._is_owned() - # In any situation where the caller holds the activity - # lock and wants to set cancelable to true, a cancel - # should not already be in progress. This is because - # it should not be possible to invoke cancel until - # this routine has finished. Assert that we're not - # canceling. - assert not self.__canceling - - self.__can_be_canceled = status - if self.__cancel_state_callable: - self.__cancel_state_callable(self.__can_be_canceled) - - def reset(self): - """Resets the API back the initial state. Note: - this does not necessarily return the disk to its initial state - since the indexes or download cache may have been changed by - the prepare method.""" - self._acquire_activity_lock() - self.__reset_unlock() - self._activity_lock.release() - - def __reset_unlock(self): - """Private method. Provides a way to reset without taking the - activity lock. Should only be called by a thread which already - holds the activity lock.""" - - assert self._activity_lock._is_owned() - - # This needs to be done first so that find_root can use it. - self.__progresstracker.reset() - - # Ensure alternate sources are always cleared in an - # exception scenario. - self.__set_img_alt_sources(None) - self.__alt_sources = {} - - self._img.cleanup_downloads() - # Cache transport statistics about problematic repo sources - repo_status = self._img.transport.repo_status - self._img.transport.shutdown() - - # Recreate the image object using the path the api - # object was created with instead of the current path. - self._img = image.Image(self._img_path, - progtrack=self.__progresstracker, - user_provided_dir=True, - cmdpath=self.cmdpath) - self._img.blocking_locks = self.__blocking_locks - - self._img.transport.repo_status = repo_status - - lin = None - if self._img.linked.ischild(): - lin = self._img.linked.child_name - self.__progresstracker.set_linked_name(lin) - - self.__plan_desc = None - self.__planned_children = False - self.__plan_type = None - self.__prepared = False - self.__executed = False - self.__be_name = None - - self._cancel_cleanup_exception() - - def __check_cancel(self): - """Private method. Provides a callback method for internal - code to use to determine whether the current action has been - canceled.""" - return self.__canceling - - def _cancel_cleanup_exception(self): - """A private method that is called from exception handlers. - This is not needed if the method calls reset unlock, - which will call this method too. This catches the case - where a caller might have called cancel and gone to sleep, - but the requested operation failed with an exception before - it could raise a CanceledException.""" - - self.__cancel_lock.acquire() - self.__set_can_be_canceled(False) - self.__canceling = False - # Wake up any threads that are waiting on this aborted - # operation. - self.__cancel_cv.notify_all() - self.__cancel_lock.release() - - def _cancel_done(self): - """A private method that wakes any threads that have been - sleeping, waiting for a cancellation to finish.""" - - self.__cancel_lock.acquire() - if self.__canceling: - self.__canceling = False - self.__cancel_cv.notify_all() - self.__cancel_lock.release() - - def cancel(self): - """Used for asynchronous cancelation. It returns the API - to the state it was in prior to the current method being - invoked. Canceling during a plan phase returns the API to - its initial state. Canceling during prepare puts the API - into the state it was in just after planning had completed. - Plan execution cannot be canceled. A call to this method blocks - until the cancellation has happened. Note: this does not - necessarily return the disk to its initial state since the - indexes or download cache may have been changed by the - prepare method.""" - - self.__cancel_lock.acquire() - - if not self.__can_be_canceled: - self.__cancel_lock.release() - return False - - self.__set_can_be_canceled(False) - self.__canceling = True - # Wait until the cancelled operation wakes us up. - self.__cancel_cv.wait() - self.__cancel_lock.release() - return True + yield (t, summ, pcats, states, attrs) + + if raise_unmatched: + # Caller has requested that non-matching patterns or + # patterns that match multiple packages cause an + # exception to be raised. + notfound = set(pat_tuples.keys()) - matched_pats + if raise_unmatched and notfound: + raise apx.InventoryException(notfound=notfound) + + @_LockedCancelable() + def info(self, fmri_strings, local, info_needed, ranked=False, repos=None): + """Gathers information about fmris. fmri_strings is a list + of fmri_names for which information is desired. local + determines whether to retrieve the information locally + (if possible). It returns a dictionary of lists. The keys + for the dictionary are the constants specified in the class + definition. The values are lists of PackageInfo objects or + strings. + + 'ranked' is an optional boolean value that indicates whether + only the matching package versions from the highest-ranked + publisher should be returned. This option is ignored for + patterns that explicitly specify the publisher to match. + + 'repos' is a list of URI strings or RepositoryURI objects that + represent the locations of packages to return information for. + """ - def clear_history(self): - """Discard history information about in-progress operations.""" - self._img.history.clear() + bad_opts = info_needed - PackageInfo.ALL_OPTIONS + if bad_opts: + raise apx.UnrecognizedOptionsToInfo(bad_opts) + + self.log_operation_start("info") + + # Common logic for image and temp repos case. + if local: + ilist = self.LIST_INSTALLED + else: + # Verify validity of certificates before attempting + # network operations. + self.__cert_verify(log_op_end=[apx.CertificateError]) + ilist = self.LIST_NEWEST + + # The pkg_pub_map is only populated when temp repos are + # specified and maps packages to the repositories that + # contain them for manifest retrieval. + pkg_pub_map = None + known_cat = None + inst_cat = None + if repos: + pkg_pub_map, ignored, known_cat, inst_cat = self.__get_alt_pkg_data( + repos + ) + if local: + pkg_cat = inst_cat + else: + pkg_cat = known_cat + elif local: + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_INSTALLED) + if not fmri_strings and pkg_cat.package_count == 0: + self.log_operation_end(result=RESULT_NOTHING_TO_DO) + raise apx.NoPackagesInstalledException() + else: + pkg_cat = self._img.get_catalog(self._img.IMG_CATALOG_KNOWN) + + excludes = self._img.list_excludes() + + # Set of options that can use catalog data. + cat_opts = frozenset( + [PackageInfo.DESCRIPTION, PackageInfo.DEPENDENCIES] + ) + + # Set of options that require manifest retrieval. + act_opts = PackageInfo.ACTION_OPTIONS - frozenset( + [PackageInfo.DEPENDENCIES] + ) + + collect_attrs = PackageInfo.ALL_ATTRIBUTES in info_needed + + pis = [] + rval = { + self.INFO_FOUND: pis, + self.INFO_MISSING: misc.EmptyI, + self.INFO_ILLEGALS: misc.EmptyI, + } - def __set_history_PlanCreationException(self, e): - if e.unmatched_fmris or e.multiple_matches or \ - e.missing_matches or e.illegal: - self.log_operation_end(error=e, - result=RESULT_FAILED_BAD_REQUEST) + try: + for ( + pfmri, + summary, + cats, + states, + attrs, + mdata, + ) in self.__get_pkg_list( + ilist, + collect_attrs=collect_attrs, + inst_cat=inst_cat, + known_cat=known_cat, + patterns=fmri_strings, + raise_unmatched=True, + ranked=ranked, + return_fmris=True, + return_metadata=True, + variants=True, + ): + release = build_release = branch = packaging_date = None + + pub, name, version = pfmri.tuple() + alt_pub = None + if pkg_pub_map: + alt_pub = pkg_pub_map[pub][name][str(version)] + + if PackageInfo.IDENTITY in info_needed: + release = version.release + build_release = version.build_release + branch = version.branch + packaging_date = version.get_timestamp().strftime("%c") else: - self.log_operation_end(error=e) - - @_LockedGenerator() - def local_search(self, query_lst): - """local_search takes a list of Query objects and performs - each query against the installed packages of the image.""" - - l = query_p.QueryLexer() - l.build() - qp = query_p.QueryParser(l) - ssu = None - for i, q in enumerate(query_lst): - try: - query = qp.parse(q.text) - query_rr = qp.parse(q.text) - if query_rr.remove_root(self._img.root): - query.add_or(query_rr) - if q.return_type == \ - query_p.Query.RETURN_PACKAGES: - query.propagate_pkg_return() - except query_p.BooleanQueryException as e: - raise apx.BooleanQueryException(e) - except query_p.ParseError as e: - raise apx.ParseError(e) - self._img.update_index_dir() - assert self._img.index_dir - try: - query.set_info(num_to_return=q.num_to_return, - start_point=q.start_point, - index_dir=self._img.index_dir, - get_manifest_path=\ - self._img.get_manifest_path, - gen_installed_pkg_names=\ - self._img.gen_installed_pkg_names, - case_sensitive=q.case_sensitive) - res = query.search( - self._img.gen_installed_pkgs, - self._img.get_manifest_path, - self._img.list_excludes()) - except search_errors.InconsistentIndexException as e: - raise apx.InconsistentIndexException(e) - # i is being inserted to track which query the results - # are for. None is being inserted since there is no - # publisher being searched against. - try: - for r in res: - yield i, None, r - except apx.SlowSearchUsed as e: - ssu = e - if ssu: - raise ssu - - @staticmethod - def __parse_v_0(line, pub, v): - """This function parses the string returned by a version 0 - search server and puts it into the expected format of - (query_number, publisher, (version, return_type, (results))). - - "query_number" in the return value is fixed at 0 since search - v0 servers cannot accept multiple queries in a single HTTP - request.""" - - line = line.strip() - fields = line.split(None, 3) - return (0, pub, (v, Query.RETURN_ACTIONS, (fields[:4]))) - - @staticmethod - def __parse_v_1(line, pub, v): - """This function parses the string returned by a version 1 - search server and puts it into the expected format of - (query_number, publisher, (version, return_type, (results))) - If it receives a line it can't parse, it raises a - ServerReturnError.""" - - fields = line.split(None, 2) - if len(fields) != 3: - raise apx.ServerReturnError(line) - try: - return_type = int(fields[1]) - query_num = int(fields[0]) - except ValueError: - raise apx.ServerReturnError(line) - if return_type == Query.RETURN_ACTIONS: - subfields = fields[2].split(None, 2) - pfmri = fmri.PkgFmri(subfields[0]) - return pfmri, (query_num, pub, (v, return_type, - (pfmri, unquote(subfields[1]), - subfields[2]))) - elif return_type == Query.RETURN_PACKAGES: - pfmri = fmri.PkgFmri(fields[2]) - return pfmri, (query_num, pub, (v, return_type, pfmri)) + pub = name = version = None + + links = ( + hardlinks + ) = ( + files + ) = ( + dirs + ) = csize = size = licenses = cat_info = description = None + + if PackageInfo.CATEGORIES in info_needed: + cat_info = [ + PackageCategory(scheme, cat) for scheme, cat in cats + ] + + ret_cat_data = cat_opts & info_needed + dependencies = None + unsupported = False + if ret_cat_data: + try: + ( + ignored, + description, + ignored, + dependencies, + ) = _get_pkg_cat_data( + pkg_cat, + ret_cat_data, + excludes=excludes, + pfmri=pfmri, + ) + except apx.InvalidPackageErrors: + # If the information can't be + # retrieved because the manifest + # can't be parsed, mark it and + # continue. + unsupported = True + + if dependencies is None: + dependencies = misc.EmptyI + + mfst = None + if ( + not unsupported + and ( + frozenset([PackageInfo.SIZE, PackageInfo.LICENSES]) + | act_opts + ) + & info_needed + ): + try: + mfst = self._img.get_manifest(pfmri, alt_pub=alt_pub) + except apx.InvalidPackageErrors: + # If the information can't be + # retrieved because the manifest + # can't be parsed, mark it and + # continue. + unsupported = True + + if mfst is not None: + if PackageInfo.LICENSES in info_needed: + licenses = self.__licenses(pfmri, mfst, alt_pub=alt_pub) + + if PackageInfo.SIZE in info_needed: + size, csize = mfst.get_size(excludes=excludes) + + if act_opts & info_needed: + if PackageInfo.LINKS in info_needed: + links = list( + mfst.gen_key_attribute_value_by_type( + "link", excludes + ) + ) + if PackageInfo.HARDLINKS in info_needed: + hardlinks = list( + mfst.gen_key_attribute_value_by_type( + "hardlink", excludes + ) + ) + if PackageInfo.FILES in info_needed: + files = list( + mfst.gen_key_attribute_value_by_type( + "file", excludes + ) + ) + if PackageInfo.DIRS in info_needed: + dirs = list( + mfst.gen_key_attribute_value_by_type( + "dir", excludes + ) + ) + elif PackageInfo.SIZE in info_needed: + size = csize = 0 + + # Trim response set. + last_install = None + last_update = None + if PackageInfo.STATE in info_needed: + if ( + unsupported is True + and PackageInfo.UNSUPPORTED not in states + ): + # Mark package as + # unsupported so that + # caller can decide + # what to do. + states = set(states) + states.add(PackageInfo.UNSUPPORTED) + + if "last-update" in mdata: + last_update = catalog.basic_ts_to_datetime( + mdata["last-update"] + ).strftime("%c") + if "last-install" in mdata: + last_install = catalog.basic_ts_to_datetime( + mdata["last-install"] + ).strftime("%c") else: - raise apx.ServerReturnError(line) - - @_LockedGenerator() - def remote_search(self, query_str_and_args_lst, servers=None, - prune_versions=True): - """This function takes a list of Query objects, and optionally - a list of servers to search against. It performs each query - against each server and yields the results in turn. If no - servers are provided, the search is conducted against all - active servers known by the image. - - The servers argument is a list of servers in two possible - forms: the old deprecated form of a publisher, in a - dictionary, or a Publisher object. - - A call to this function returns a generator that holds - API locks. Callers must either iterate through all of the - results, or call close() on the resulting object. Otherwise - it is possible to get deadlocks or NRLock reentrance - exceptions.""" - - failed = [] - invalid = [] - unsupported = [] - - if not servers: - servers = self._img.gen_publishers() - - new_qs = [] - l = query_p.QueryLexer() - l.build() - qp = query_p.QueryParser(l) - for q in query_str_and_args_lst: - try: - query = qp.parse(q.text) - query_rr = qp.parse(q.text) - if query_rr.remove_root(self._img.root): - query.add_or(query_rr) - if q.return_type == \ - query_p.Query.RETURN_PACKAGES: - query.propagate_pkg_return() - new_qs.append(query_p.Query(str(query), - q.case_sensitive, q.return_type, - q.num_to_return, q.start_point)) - except query_p.BooleanQueryException as e: - raise apx.BooleanQueryException(e) - except query_p.ParseError as e: - raise apx.ParseError(e) - - query_str_and_args_lst = new_qs - - incorp_info, inst_stems = self.get_incorp_info() - - slist = [] - for entry in servers: - if isinstance(entry, dict): - origin = entry["origin"] - try: - pub = self._img.get_publisher( - origin=origin) - pub_uri = publisher.RepositoryURI( - origin) - repo = publisher.Repository( - origins=[pub_uri]) - except apx.UnknownPublisher: - pub = publisher.RepositoryURI(origin) - repo = publisher.Repository( - origins=[pub]) - slist.append((pub, repo, origin)) - continue - - # Must be a publisher object. - osets = entry.get_origin_sets() - if not osets: - continue - for repo in osets: - slist.append((entry, repo, entry.prefix)) - - for pub, alt_repo, descriptive_name in slist: - if self.__canceling: - raise apx.CanceledException() - - try: - res = self._img.transport.do_search(pub, - query_str_and_args_lst, - ccancel=self.__check_cancel, - alt_repo=alt_repo) - except apx.CanceledException: - raise - except apx.NegativeSearchResult: - continue - except (apx.InvalidDepotResponseException, - apx.TransportError) as e: - # Alternate source failed portal test or can't - # be contacted at all. - failed.append((descriptive_name, e)) - continue - except apx.UnsupportedSearchError as e: - unsupported.append((descriptive_name, e)) - continue - except apx.MalformedSearchRequest as e: - ex = self._validate_search( - query_str_and_args_lst) - if ex: - raise ex - failed.append((descriptive_name, e)) - continue - - try: - if not self.validate_response(res, 1): - invalid.append(descriptive_name) - continue - for line in res: - pfmri, ret = self.__parse_v_1(line, pub, - 1) - pstem = pfmri.pkg_name - pver = pfmri.version - # Skip this package if a newer version - # is already installed and version - # pruning is enabled. - if prune_versions and \ - pstem in inst_stems and \ - pver < inst_stems[pstem]: - continue - # Return this result if version pruning - # is disabled, the package is not - # incorporated, or the version of the - # package matches the incorporation. - if not prune_versions or \ - pstem not in incorp_info or \ - pfmri.version.is_successor( - incorp_info[pstem], - pkg.version.CONSTRAINT_AUTO): - yield ret - - except apx.CanceledException: - raise - except apx.TransportError as e: - failed.append((descriptive_name, e)) - continue + states = misc.EmptyI - if failed or invalid or unsupported: - raise apx.ProblematicSearchServers(failed, - invalid, unsupported) - - def get_incorp_info(self): - """This function returns a mapping of package stems to the - version at which they are incorporated, if they are - incorporated, and the version at which they are installed, if - they are installed.""" - - # This maps fmris to the version at which they're incorporated. - inc_vers = {} - inst_stems = {} - - img_cat = self._img.get_catalog( - self._img.IMG_CATALOG_INSTALLED) - cat_info = frozenset([img_cat.DEPENDENCY]) - - # The incorporation list should include all installed, - # incorporated packages from all publishers. - for pfmri, actions in img_cat.actions(cat_info): - inst_stems[pfmri.pkg_name] = pfmri.version - for a in actions: - if a.name != "depend" or \ - a.attrs["type"] != "incorporate": - continue - # Record incorporated packages. - tgt = fmri.PkgFmri(a.attrs["fmri"]) - tver = tgt.version - # incorporates without a version should be - # ignored. - if not tver: - continue - over = inc_vers.get( - tgt.pkg_name, None) - - # In case this package has been - # incorporated more than once, - # use the newest version. - if over > tver: - continue - inc_vers[tgt.pkg_name] = tver - return inc_vers, inst_stems - - @staticmethod - def __unconvert_return_type(v): - return v == query_p.Query.RETURN_ACTIONS - - def _validate_search(self, query_str_lst): - """Called by remote search if server responds that the - request was invalid. In this case, parse the query on - the client-side and determine what went wrong.""" - - for q in query_str_lst: - l = query_p.QueryLexer() - l.build() - qp = query_p.QueryParser(l) - try: - query = qp.parse(q.text) - except query_p.BooleanQueryException as e: - return apx.BooleanQueryException(e) - except query_p.ParseError as e: - return apx.ParseError(e) - - return None - - def rebuild_search_index(self): - """Rebuilds the search indexes. Removes all - existing indexes and replaces them from scratch rather than - performing the incremental update which is usually used. - This is useful for times when the index for the client has - been corrupted.""" - self._img.update_index_dir() - self.log_operation_start("rebuild-index") - if not os.path.isdir(self._img.index_dir): - self._img.mkdirs() - try: - ind = indexer.Indexer(self._img, self._img.get_manifest, - self._img.get_manifest_path, - self.__progresstracker, self._img.list_excludes()) - ind.rebuild_index_from_scratch( - self._img.gen_installed_pkgs()) - except search_errors.ProblematicPermissionsIndexException as e: - error = apx.ProblematicPermissionsIndexException(e) - self.log_operation_end(error=error) - raise error + if PackageInfo.CATEGORIES not in info_needed: + cats = None + if PackageInfo.SUMMARY in info_needed: + if summary is None: + summary = "" else: - self.log_operation_end() - - def get_manifest(self, pfmri, all_variants=True, repos=None): - """Returns the Manifest object for the given package FMRI. - - 'all_variants' is an optional boolean value indicating whther - the manifest should include metadata for all variants and - facets. + summary = None + + pis.append( + PackageInfo( + pkg_stem=name, + summary=summary, + category_info_list=cat_info, + states=states, + publisher=pub, + version=release, + build_release=build_release, + branch=branch, + packaging_date=packaging_date, + size=size, + csize=csize, + pfmri=pfmri, + licenses=licenses, + links=links, + hardlinks=hardlinks, + files=files, + dirs=dirs, + dependencies=dependencies, + description=description, + attrs=attrs, + last_update=last_update, + last_install=last_install, + ) + ) + except apx.InventoryException as e: + if e.illegal: + self.log_operation_end(result=RESULT_FAILED_BAD_REQUEST) + rval[self.INFO_MISSING] = e.notfound + rval[self.INFO_ILLEGALS] = e.illegal + else: + if pis: + self.log_operation_end() + else: + self.log_operation_end(result=RESULT_NOTHING_TO_DO) + return rval + + def can_be_canceled(self): + """Returns true if the API is in a cancelable state.""" + return self.__can_be_canceled + + def _disable_cancel(self): + """Sets_can_be_canceled to False in a way that prevents missed + wakeups. This may raise CanceledException, if a + cancellation is pending.""" + + self.__cancel_lock.acquire() + if self.__canceling: + self.__cancel_lock.release() + self._img.transport.reset() + raise apx.CanceledException() + else: + self.__set_can_be_canceled(False) + self.__cancel_lock.release() + + def _enable_cancel(self): + """Sets can_be_canceled to True while grabbing the cancel + locks. The caller must still hold the activity lock while + calling this function.""" + + self.__cancel_lock.acquire() + self.__set_can_be_canceled(True) + self.__cancel_lock.release() + + def __set_can_be_canceled(self, status): + """Private method. Handles the details of changing the + cancelable state.""" + assert self.__cancel_lock._is_owned() + + # If caller requests a change to current state there is + # nothing to do. + if self.__can_be_canceled == status: + return + + if status: + # Callers must hold activity lock for operations + # that they will make cancelable. + assert self._activity_lock._is_owned() + # In any situation where the caller holds the activity + # lock and wants to set cancelable to true, a cancel + # should not already be in progress. This is because + # it should not be possible to invoke cancel until + # this routine has finished. Assert that we're not + # canceling. + assert not self.__canceling + + self.__can_be_canceled = status + if self.__cancel_state_callable: + self.__cancel_state_callable(self.__can_be_canceled) + + def reset(self): + """Resets the API back the initial state. Note: + this does not necessarily return the disk to its initial state + since the indexes or download cache may have been changed by + the prepare method.""" + self._acquire_activity_lock() + self.__reset_unlock() + self._activity_lock.release() + + def __reset_unlock(self): + """Private method. Provides a way to reset without taking the + activity lock. Should only be called by a thread which already + holds the activity lock.""" + + assert self._activity_lock._is_owned() + + # This needs to be done first so that find_root can use it. + self.__progresstracker.reset() + + # Ensure alternate sources are always cleared in an + # exception scenario. + self.__set_img_alt_sources(None) + self.__alt_sources = {} + + self._img.cleanup_downloads() + # Cache transport statistics about problematic repo sources + repo_status = self._img.transport.repo_status + self._img.transport.shutdown() + + # Recreate the image object using the path the api + # object was created with instead of the current path. + self._img = image.Image( + self._img_path, + progtrack=self.__progresstracker, + user_provided_dir=True, + cmdpath=self.cmdpath, + ) + self._img.blocking_locks = self.__blocking_locks + + self._img.transport.repo_status = repo_status + + lin = None + if self._img.linked.ischild(): + lin = self._img.linked.child_name + self.__progresstracker.set_linked_name(lin) + + self.__plan_desc = None + self.__planned_children = False + self.__plan_type = None + self.__prepared = False + self.__executed = False + self.__be_name = None + + self._cancel_cleanup_exception() + + def __check_cancel(self): + """Private method. Provides a callback method for internal + code to use to determine whether the current action has been + canceled.""" + return self.__canceling + + def _cancel_cleanup_exception(self): + """A private method that is called from exception handlers. + This is not needed if the method calls reset unlock, + which will call this method too. This catches the case + where a caller might have called cancel and gone to sleep, + but the requested operation failed with an exception before + it could raise a CanceledException.""" + + self.__cancel_lock.acquire() + self.__set_can_be_canceled(False) + self.__canceling = False + # Wake up any threads that are waiting on this aborted + # operation. + self.__cancel_cv.notify_all() + self.__cancel_lock.release() + + def _cancel_done(self): + """A private method that wakes any threads that have been + sleeping, waiting for a cancellation to finish.""" + + self.__cancel_lock.acquire() + if self.__canceling: + self.__canceling = False + self.__cancel_cv.notify_all() + self.__cancel_lock.release() + + def cancel(self): + """Used for asynchronous cancelation. It returns the API + to the state it was in prior to the current method being + invoked. Canceling during a plan phase returns the API to + its initial state. Canceling during prepare puts the API + into the state it was in just after planning had completed. + Plan execution cannot be canceled. A call to this method blocks + until the cancellation has happened. Note: this does not + necessarily return the disk to its initial state since the + indexes or download cache may have been changed by the + prepare method.""" + + self.__cancel_lock.acquire() + + if not self.__can_be_canceled: + self.__cancel_lock.release() + return False + + self.__set_can_be_canceled(False) + self.__canceling = True + # Wait until the cancelled operation wakes us up. + self.__cancel_cv.wait() + self.__cancel_lock.release() + return True + + def clear_history(self): + """Discard history information about in-progress operations.""" + self._img.history.clear() + + def __set_history_PlanCreationException(self, e): + if ( + e.unmatched_fmris + or e.multiple_matches + or e.missing_matches + or e.illegal + ): + self.log_operation_end(error=e, result=RESULT_FAILED_BAD_REQUEST) + else: + self.log_operation_end(error=e) + + @_LockedGenerator() + def local_search(self, query_lst): + """local_search takes a list of Query objects and performs + each query against the installed packages of the image.""" + + l = query_p.QueryLexer() + l.build() + qp = query_p.QueryParser(l) + ssu = None + for i, q in enumerate(query_lst): + try: + query = qp.parse(q.text) + query_rr = qp.parse(q.text) + if query_rr.remove_root(self._img.root): + query.add_or(query_rr) + if q.return_type == query_p.Query.RETURN_PACKAGES: + query.propagate_pkg_return() + except query_p.BooleanQueryException as e: + raise apx.BooleanQueryException(e) + except query_p.ParseError as e: + raise apx.ParseError(e) + self._img.update_index_dir() + assert self._img.index_dir + try: + query.set_info( + num_to_return=q.num_to_return, + start_point=q.start_point, + index_dir=self._img.index_dir, + get_manifest_path=self._img.get_manifest_path, + gen_installed_pkg_names=self._img.gen_installed_pkg_names, + case_sensitive=q.case_sensitive, + ) + res = query.search( + self._img.gen_installed_pkgs, + self._img.get_manifest_path, + self._img.list_excludes(), + ) + except search_errors.InconsistentIndexException as e: + raise apx.InconsistentIndexException(e) + # i is being inserted to track which query the results + # are for. None is being inserted since there is no + # publisher being searched against. + try: + for r in res: + yield i, None, r + except apx.SlowSearchUsed as e: + ssu = e + if ssu: + raise ssu + + @staticmethod + def __parse_v_0(line, pub, v): + """This function parses the string returned by a version 0 + search server and puts it into the expected format of + (query_number, publisher, (version, return_type, (results))). + + "query_number" in the return value is fixed at 0 since search + v0 servers cannot accept multiple queries in a single HTTP + request.""" + + line = line.strip() + fields = line.split(None, 3) + return (0, pub, (v, Query.RETURN_ACTIONS, (fields[:4]))) + + @staticmethod + def __parse_v_1(line, pub, v): + """This function parses the string returned by a version 1 + search server and puts it into the expected format of + (query_number, publisher, (version, return_type, (results))) + If it receives a line it can't parse, it raises a + ServerReturnError.""" + + fields = line.split(None, 2) + if len(fields) != 3: + raise apx.ServerReturnError(line) + try: + return_type = int(fields[1]) + query_num = int(fields[0]) + except ValueError: + raise apx.ServerReturnError(line) + if return_type == Query.RETURN_ACTIONS: + subfields = fields[2].split(None, 2) + pfmri = fmri.PkgFmri(subfields[0]) + return pfmri, ( + query_num, + pub, + (v, return_type, (pfmri, unquote(subfields[1]), subfields[2])), + ) + elif return_type == Query.RETURN_PACKAGES: + pfmri = fmri.PkgFmri(fields[2]) + return pfmri, (query_num, pub, (v, return_type, pfmri)) + else: + raise apx.ServerReturnError(line) + + @_LockedGenerator() + def remote_search( + self, query_str_and_args_lst, servers=None, prune_versions=True + ): + """This function takes a list of Query objects, and optionally + a list of servers to search against. It performs each query + against each server and yields the results in turn. If no + servers are provided, the search is conducted against all + active servers known by the image. + + The servers argument is a list of servers in two possible + forms: the old deprecated form of a publisher, in a + dictionary, or a Publisher object. + + A call to this function returns a generator that holds + API locks. Callers must either iterate through all of the + results, or call close() on the resulting object. Otherwise + it is possible to get deadlocks or NRLock reentrance + exceptions.""" + + failed = [] + invalid = [] + unsupported = [] + + if not servers: + servers = self._img.gen_publishers() + + new_qs = [] + l = query_p.QueryLexer() + l.build() + qp = query_p.QueryParser(l) + for q in query_str_and_args_lst: + try: + query = qp.parse(q.text) + query_rr = qp.parse(q.text) + if query_rr.remove_root(self._img.root): + query.add_or(query_rr) + if q.return_type == query_p.Query.RETURN_PACKAGES: + query.propagate_pkg_return() + new_qs.append( + query_p.Query( + str(query), + q.case_sensitive, + q.return_type, + q.num_to_return, + q.start_point, + ) + ) + except query_p.BooleanQueryException as e: + raise apx.BooleanQueryException(e) + except query_p.ParseError as e: + raise apx.ParseError(e) - 'repos' is a list of URI strings or RepositoryURI objects that - represent the locations of additional sources of package data to - use during the planned operation. - """ + query_str_and_args_lst = new_qs - alt_pub = None - if repos: - pkg_pub_map, ignored, known_cat, inst_cat = \ - self.__get_alt_pkg_data(repos) - alt_pub = pkg_pub_map.get(pfmri.publisher, {}).get( - pfmri.pkg_name, {}).get(str(pfmri.version), None) - return self._img.get_manifest(pfmri, - ignore_excludes=all_variants, alt_pub=alt_pub) - - @staticmethod - def validate_response(res, v): - """This function is used to determine whether the first - line returned from a server is expected. This ensures that - search is really communicating with a search-enabled server.""" + incorp_info, inst_stems = self.get_incorp_info() + slist = [] + for entry in servers: + if isinstance(entry, dict): + origin = entry["origin"] try: - s = next(res) - return s == Query.VALIDATION_STRING[v] - except StopIteration: - return False - - def add_publisher(self, pub, refresh_allowed=True, - approved_cas=misc.EmptyI, revoked_cas=misc.EmptyI, - search_after=None, search_before=None, search_first=None, - unset_cas=misc.EmptyI): - """Add the provided publisher object to the image - configuration.""" - try: - self._img.add_publisher(pub, - refresh_allowed=refresh_allowed, - progtrack=self.__progresstracker, - approved_cas=approved_cas, revoked_cas=revoked_cas, - search_after=search_after, - search_before=search_before, - search_first=search_first, - unset_cas=unset_cas) - finally: - self._img.cleanup_downloads() - - def get_highest_ranked_publisher(self): - """Returns the highest ranked publisher object for the image.""" - return self._img.get_highest_ranked_publisher() - - def get_publisher(self, prefix=None, alias=None, duplicate=False): - """Retrieves a publisher object matching the provided prefix - (name) or alias. - - 'duplicate' is an optional boolean value indicating whether - a copy of the publisher object should be returned instead - of the original. - """ - pub = self._img.get_publisher(prefix=prefix, alias=alias) - if duplicate: - # Never return the original so that changes to the - # retrieved object are not reflected until - # update_publisher is called. - return copy.copy(pub) - return pub - - @_LockedCancelable() - def get_publisherdata(self, pub=None, repo=None): - """Attempts to retrieve publisher configuration information from - the specified publisher's repository or the provided repository. - If successful, it will either return an empty list (in the case - that the repository supports the operation, but doesn't offer - configuration information) or a list of Publisher objects. - If this operation is not supported by the publisher or the - specified repository, an UnsupportedRepositoryOperation - exception will be raised. - - 'pub' is an optional Publisher object. - - 'repo' is an optional RepositoryURI object. - - Either 'pub' or 'repo' must be provided.""" - - assert (pub or repo) and not (pub and repo) - - # Transport accepts either type of object, but a distinction is - # made in the client API for clarity. - pub = max(pub, repo) - - return self._img.transport.get_publisherdata(pub, - ccancel=self.__check_cancel) - - def get_publishers(self, duplicate=False): - """Returns a list of the publisher objects for the current - image. - - 'duplicate' is an optional boolean value indicating whether - copies of the publisher objects should be returned instead - of the originals. - """ - - res = self._img.get_sorted_publishers() - if duplicate: - return [copy.copy(p) for p in res] - return res + pub = self._img.get_publisher(origin=origin) + pub_uri = publisher.RepositoryURI(origin) + repo = publisher.Repository(origins=[pub_uri]) + except apx.UnknownPublisher: + pub = publisher.RepositoryURI(origin) + repo = publisher.Repository(origins=[pub]) + slist.append((pub, repo, origin)) + continue + + # Must be a publisher object. + osets = entry.get_origin_sets() + if not osets: + continue + for repo in osets: + slist.append((entry, repo, entry.prefix)) + + for pub, alt_repo, descriptive_name in slist: + if self.__canceling: + raise apx.CanceledException() + + try: + res = self._img.transport.do_search( + pub, + query_str_and_args_lst, + ccancel=self.__check_cancel, + alt_repo=alt_repo, + ) + except apx.CanceledException: + raise + except apx.NegativeSearchResult: + continue + except (apx.InvalidDepotResponseException, apx.TransportError) as e: + # Alternate source failed portal test or can't + # be contacted at all. + failed.append((descriptive_name, e)) + continue + except apx.UnsupportedSearchError as e: + unsupported.append((descriptive_name, e)) + continue + except apx.MalformedSearchRequest as e: + ex = self._validate_search(query_str_and_args_lst) + if ex: + raise ex + failed.append((descriptive_name, e)) + continue + + try: + if not self.validate_response(res, 1): + invalid.append(descriptive_name) + continue + for line in res: + pfmri, ret = self.__parse_v_1(line, pub, 1) + pstem = pfmri.pkg_name + pver = pfmri.version + # Skip this package if a newer version + # is already installed and version + # pruning is enabled. + if ( + prune_versions + and pstem in inst_stems + and pver < inst_stems[pstem] + ): + continue + # Return this result if version pruning + # is disabled, the package is not + # incorporated, or the version of the + # package matches the incorporation. + if ( + not prune_versions + or pstem not in incorp_info + or pfmri.version.is_successor( + incorp_info[pstem], pkg.version.CONSTRAINT_AUTO + ) + ): + yield ret - def get_publisher_last_update_time(self, prefix=None, alias=None): - """Returns a datetime object representing the last time the - catalog for a publisher was modified or None.""" + except apx.CanceledException: + raise + except apx.TransportError as e: + failed.append((descriptive_name, e)) + continue + + if failed or invalid or unsupported: + raise apx.ProblematicSearchServers(failed, invalid, unsupported) + + def get_incorp_info(self): + """This function returns a mapping of package stems to the + version at which they are incorporated, if they are + incorporated, and the version at which they are installed, if + they are installed.""" + + # This maps fmris to the version at which they're incorporated. + inc_vers = {} + inst_stems = {} + + img_cat = self._img.get_catalog(self._img.IMG_CATALOG_INSTALLED) + cat_info = frozenset([img_cat.DEPENDENCY]) + + # The incorporation list should include all installed, + # incorporated packages from all publishers. + for pfmri, actions in img_cat.actions(cat_info): + inst_stems[pfmri.pkg_name] = pfmri.version + for a in actions: + if a.name != "depend" or a.attrs["type"] != "incorporate": + continue + # Record incorporated packages. + tgt = fmri.PkgFmri(a.attrs["fmri"]) + tver = tgt.version + # incorporates without a version should be + # ignored. + if not tver: + continue + over = inc_vers.get(tgt.pkg_name, None) + + # In case this package has been + # incorporated more than once, + # use the newest version. + if over > tver: + continue + inc_vers[tgt.pkg_name] = tver + return inc_vers, inst_stems + + @staticmethod + def __unconvert_return_type(v): + return v == query_p.Query.RETURN_ACTIONS + + def _validate_search(self, query_str_lst): + """Called by remote search if server responds that the + request was invalid. In this case, parse the query on + the client-side and determine what went wrong.""" + + for q in query_str_lst: + l = query_p.QueryLexer() + l.build() + qp = query_p.QueryParser(l) + try: + query = qp.parse(q.text) + except query_p.BooleanQueryException as e: + return apx.BooleanQueryException(e) + except query_p.ParseError as e: + return apx.ParseError(e) + + return None + + def rebuild_search_index(self): + """Rebuilds the search indexes. Removes all + existing indexes and replaces them from scratch rather than + performing the incremental update which is usually used. + This is useful for times when the index for the client has + been corrupted.""" + self._img.update_index_dir() + self.log_operation_start("rebuild-index") + if not os.path.isdir(self._img.index_dir): + self._img.mkdirs() + try: + ind = indexer.Indexer( + self._img, + self._img.get_manifest, + self._img.get_manifest_path, + self.__progresstracker, + self._img.list_excludes(), + ) + ind.rebuild_index_from_scratch(self._img.gen_installed_pkgs()) + except search_errors.ProblematicPermissionsIndexException as e: + error = apx.ProblematicPermissionsIndexException(e) + self.log_operation_end(error=error) + raise error + else: + self.log_operation_end() + + def get_manifest(self, pfmri, all_variants=True, repos=None): + """Returns the Manifest object for the given package FMRI. + + 'all_variants' is an optional boolean value indicating whther + the manifest should include metadata for all variants and + facets. + + 'repos' is a list of URI strings or RepositoryURI objects that + represent the locations of additional sources of package data to + use during the planned operation. + """ - if alias: - pub = self.get_publisher(alias=alias) - else: - pub = self.get_publisher(prefix=prefix) + alt_pub = None + if repos: + pkg_pub_map, ignored, known_cat, inst_cat = self.__get_alt_pkg_data( + repos + ) + alt_pub = ( + pkg_pub_map.get(pfmri.publisher, {}) + .get(pfmri.pkg_name, {}) + .get(str(pfmri.version), None) + ) + return self._img.get_manifest( + pfmri, ignore_excludes=all_variants, alt_pub=alt_pub + ) + + @staticmethod + def validate_response(res, v): + """This function is used to determine whether the first + line returned from a server is expected. This ensures that + search is really communicating with a search-enabled server.""" - if pub.disabled: - return None + try: + s = next(res) + return s == Query.VALIDATION_STRING[v] + except StopIteration: + return False + + def add_publisher( + self, + pub, + refresh_allowed=True, + approved_cas=misc.EmptyI, + revoked_cas=misc.EmptyI, + search_after=None, + search_before=None, + search_first=None, + unset_cas=misc.EmptyI, + ): + """Add the provided publisher object to the image + configuration.""" + try: + self._img.add_publisher( + pub, + refresh_allowed=refresh_allowed, + progtrack=self.__progresstracker, + approved_cas=approved_cas, + revoked_cas=revoked_cas, + search_after=search_after, + search_before=search_before, + search_first=search_first, + unset_cas=unset_cas, + ) + finally: + self._img.cleanup_downloads() + + def get_highest_ranked_publisher(self): + """Returns the highest ranked publisher object for the image.""" + return self._img.get_highest_ranked_publisher() + + def get_publisher(self, prefix=None, alias=None, duplicate=False): + """Retrieves a publisher object matching the provided prefix + (name) or alias. + + 'duplicate' is an optional boolean value indicating whether + a copy of the publisher object should be returned instead + of the original. + """ + pub = self._img.get_publisher(prefix=prefix, alias=alias) + if duplicate: + # Never return the original so that changes to the + # retrieved object are not reflected until + # update_publisher is called. + return copy.copy(pub) + return pub + + @_LockedCancelable() + def get_publisherdata(self, pub=None, repo=None): + """Attempts to retrieve publisher configuration information from + the specified publisher's repository or the provided repository. + If successful, it will either return an empty list (in the case + that the repository supports the operation, but doesn't offer + configuration information) or a list of Publisher objects. + If this operation is not supported by the publisher or the + specified repository, an UnsupportedRepositoryOperation + exception will be raised. + + 'pub' is an optional Publisher object. + + 'repo' is an optional RepositoryURI object. + + Either 'pub' or 'repo' must be provided.""" + + assert (pub or repo) and not (pub and repo) + + # Transport accepts either type of object, but a distinction is + # made in the client API for clarity. + pub = max(pub, repo) + + return self._img.transport.get_publisherdata( + pub, ccancel=self.__check_cancel + ) + + def get_publishers(self, duplicate=False): + """Returns a list of the publisher objects for the current + image. + + 'duplicate' is an optional boolean value indicating whether + copies of the publisher objects should be returned instead + of the originals. + """ - dt = None - self._acquire_activity_lock() - try: - self._enable_cancel() - try: - dt = pub.catalog.last_modified - except: - self.__reset_unlock() - raise - try: - self._disable_cancel() - except apx.CanceledException: - self._cancel_done() - raise - finally: - self._activity_lock.release() - return dt - - def has_publisher(self, prefix=None, alias=None): - """Returns a boolean value indicating whether a publisher using - the given prefix or alias exists.""" - return self._img.has_publisher(prefix=prefix, alias=alias) - - def remove_publisher(self, prefix=None, alias=None): - """Removes a publisher object matching the provided prefix - (name) or alias.""" - self._img.remove_publisher(prefix=prefix, alias=alias, - progtrack=self.__progresstracker) - - self.__remove_unused_client_certificates() - - def update_publisher(self, pub, refresh_allowed=True, search_after=None, - search_before=None, search_first=None): - """Replaces an existing publisher object with the provided one - using the _source_object_id identifier set during copy. - - 'refresh_allowed' is an optional boolean value indicating - whether a refresh of publisher metadata (such as its catalog) - should be performed if transport information is changed for a - repository, mirror, or origin. If False, no attempt will be - made to retrieve publisher metadata.""" - - self._acquire_activity_lock() - try: - self._disable_cancel() - with self._img.locked_op("update-publisher"): - return self.__update_publisher(pub, - refresh_allowed=refresh_allowed, - search_after=search_after, - search_before=search_before, - search_first=search_first) - except apx.CanceledException as e: - self._cancel_done() - raise - finally: - self._img.cleanup_downloads() - self._activity_lock.release() + res = self._img.get_sorted_publishers() + if duplicate: + return [copy.copy(p) for p in res] + return res - def __update_publisher(self, pub, refresh_allowed=True, - search_after=None, search_before=None, search_first=None): - """Private publisher update method; caller responsible for - locking.""" + def get_publisher_last_update_time(self, prefix=None, alias=None): + """Returns a datetime object representing the last time the + catalog for a publisher was modified or None.""" - assert (not search_after and not search_before) or \ - (not search_after and not search_first) or \ - (not search_before and not search_first) + if alias: + pub = self.get_publisher(alias=alias) + else: + pub = self.get_publisher(prefix=prefix) - # Before continuing, validate SSL information. - try: - self._img.check_cert_validity(pubs=[pub]) - except apx.ExpiringCertificate as e: - logger.warning(str(e)) - - def origins_changed(oldr, newr): - old_origins = set([ - (o.uri, o.ssl_cert, - o.ssl_key, tuple(sorted(o.proxies)), o.disabled) - for o in oldr.origins - ]) - new_origins = set([ - (o.uri, o.ssl_cert, - o.ssl_key, tuple(sorted(o.proxies)), o.disabled) - for o in newr.origins - ]) - return (new_origins - old_origins), \ - new_origins.symmetric_difference(old_origins) - - def need_refresh(oldo, newo): - if newo.disabled: - # The publisher is disabled, so no refresh - # should be performed. - return False - - if oldo.disabled and not newo.disabled: - # The publisher has been re-enabled, so - # retrieve the catalog. - return True - - oldr = oldo.repository - newr = newo.repository - if newr._source_object_id != id(oldr): - # Selected repository has changed. - return True - # If any were added or removed, refresh. - return len(origins_changed(oldr, newr)[1]) != 0 - - refresh_catalog = False - disable = False - orig_pub = None - - # Configuration must be manipulated directly. - publishers = self._img.cfg.publishers - - # First, attempt to match the updated publisher object to an - # existing one using the object id that was stored during - # copy(). - for key, old in six.iteritems(publishers): - if pub._source_object_id == id(old): - # Store the new publisher's id and the old - # publisher object so it can be restored if the - # update operation fails. - orig_pub = (id(pub), old) - break - - if not orig_pub: - # If a matching publisher couldn't be found and - # replaced, something is wrong (client api usage - # error). - raise apx.UnknownPublisher(pub) - - # Next, be certain that the publisher's prefix and alias - # are not already in use by another publisher. - for key, old in six.iteritems(publishers): - if pub._source_object_id == id(old): - # Don't check the object we're replacing. - continue + if pub.disabled: + return None - if pub.prefix == old.prefix or \ - pub.prefix == old.alias or \ - pub.alias and (pub.alias == old.alias or - pub.alias == old.prefix): - raise apx.DuplicatePublisher(pub) + dt = None + self._acquire_activity_lock() + try: + self._enable_cancel() + try: + dt = pub.catalog.last_modified + except: + self.__reset_unlock() + raise + try: + self._disable_cancel() + except apx.CanceledException: + self._cancel_done() + raise + finally: + self._activity_lock.release() + return dt + + def has_publisher(self, prefix=None, alias=None): + """Returns a boolean value indicating whether a publisher using + the given prefix or alias exists.""" + return self._img.has_publisher(prefix=prefix, alias=alias) + + def remove_publisher(self, prefix=None, alias=None): + """Removes a publisher object matching the provided prefix + (name) or alias.""" + self._img.remove_publisher( + prefix=prefix, alias=alias, progtrack=self.__progresstracker + ) + + self.__remove_unused_client_certificates() + + def update_publisher( + self, + pub, + refresh_allowed=True, + search_after=None, + search_before=None, + search_first=None, + ): + """Replaces an existing publisher object with the provided one + using the _source_object_id identifier set during copy. + + 'refresh_allowed' is an optional boolean value indicating + whether a refresh of publisher metadata (such as its catalog) + should be performed if transport information is changed for a + repository, mirror, or origin. If False, no attempt will be + made to retrieve publisher metadata.""" + + self._acquire_activity_lock() + try: + self._disable_cancel() + with self._img.locked_op("update-publisher"): + return self.__update_publisher( + pub, + refresh_allowed=refresh_allowed, + search_after=search_after, + search_before=search_before, + search_first=search_first, + ) + except apx.CanceledException as e: + self._cancel_done() + raise + finally: + self._img.cleanup_downloads() + self._activity_lock.release() + + def __update_publisher( + self, + pub, + refresh_allowed=True, + search_after=None, + search_before=None, + search_first=None, + ): + """Private publisher update method; caller responsible for + locking.""" + + assert ( + (not search_after and not search_before) + or (not search_after and not search_first) + or (not search_before and not search_first) + ) + + # Before continuing, validate SSL information. + try: + self._img.check_cert_validity(pubs=[pub]) + except apx.ExpiringCertificate as e: + logger.warning(str(e)) - # Next, determine what needs updating and add the updated - # publisher. - for key, old in six.iteritems(publishers): - if pub._source_object_id == id(old): - old = orig_pub[-1] - if need_refresh(old, pub): - refresh_catalog = True - if not old.disabled and pub.disabled: - disable = True - - # Now remove the old publisher object using the - # iterator key if the prefix has changed. - if key != pub.prefix: - del publishers[key] - - # Prepare the new publisher object. - pub.meta_root = \ - self._img._get_publisher_meta_root( - pub.prefix) - pub.transport = self._img.transport - - # Finally, add the new publisher object. - publishers[pub.prefix] = pub - break - - def cleanup(): - # Attempting to unpack a non-sequence%s; - # pylint: disable=W0633 - new_id, old_pub = orig_pub - for new_pfx, new_pub in six.iteritems(publishers): - if id(new_pub) == new_id: - publishers[old_pub.prefix] = old_pub - break + def origins_changed(oldr, newr): + old_origins = set( + [ + ( + o.uri, + o.ssl_cert, + o.ssl_key, + tuple(sorted(o.proxies)), + o.disabled, + ) + for o in oldr.origins + ] + ) + new_origins = set( + [ + ( + o.uri, + o.ssl_cert, + o.ssl_key, + tuple(sorted(o.proxies)), + o.disabled, + ) + for o in newr.origins + ] + ) + return ( + new_origins - old_origins + ), new_origins.symmetric_difference(old_origins) + + def need_refresh(oldo, newo): + if newo.disabled: + # The publisher is disabled, so no refresh + # should be performed. + return False - repo = pub.repository + if oldo.disabled and not newo.disabled: + # The publisher has been re-enabled, so + # retrieve the catalog. + return True - validate = origins_changed(orig_pub[-1].repository, - pub.repository)[0] + oldr = oldo.repository + newr = newo.repository + if newr._source_object_id != id(oldr): + # Selected repository has changed. + return True + # If any were added or removed, refresh. + return len(origins_changed(oldr, newr)[1]) != 0 + + refresh_catalog = False + disable = False + orig_pub = None + + # Configuration must be manipulated directly. + publishers = self._img.cfg.publishers + + # First, attempt to match the updated publisher object to an + # existing one using the object id that was stored during + # copy(). + for key, old in six.iteritems(publishers): + if pub._source_object_id == id(old): + # Store the new publisher's id and the old + # publisher object so it can be restored if the + # update operation fails. + orig_pub = (id(pub), old) + break + + if not orig_pub: + # If a matching publisher couldn't be found and + # replaced, something is wrong (client api usage + # error). + raise apx.UnknownPublisher(pub) + + # Next, be certain that the publisher's prefix and alias + # are not already in use by another publisher. + for key, old in six.iteritems(publishers): + if pub._source_object_id == id(old): + # Don't check the object we're replacing. + continue + + if ( + pub.prefix == old.prefix + or pub.prefix == old.alias + or pub.alias + and (pub.alias == old.alias or pub.alias == old.prefix) + ): + raise apx.DuplicatePublisher(pub) + + # Next, determine what needs updating and add the updated + # publisher. + for key, old in six.iteritems(publishers): + if pub._source_object_id == id(old): + old = orig_pub[-1] + if need_refresh(old, pub): + refresh_catalog = True + if not old.disabled and pub.disabled: + disable = True + + # Now remove the old publisher object using the + # iterator key if the prefix has changed. + if key != pub.prefix: + del publishers[key] + + # Prepare the new publisher object. + pub.meta_root = self._img._get_publisher_meta_root(pub.prefix) + pub.transport = self._img.transport + + # Finally, add the new publisher object. + publishers[pub.prefix] = pub + break + + def cleanup(): + # Attempting to unpack a non-sequence%s; + # pylint: disable=W0633 + new_id, old_pub = orig_pub + for new_pfx, new_pub in six.iteritems(publishers): + if id(new_pub) == new_id: + publishers[old_pub.prefix] = old_pub + break + + repo = pub.repository + + validate = origins_changed(orig_pub[-1].repository, pub.repository)[0] + try: + if disable or ( + not repo.origins and orig_pub[-1].repository.origins + ): + # Remove the publisher's metadata (such as + # catalogs, etc.). This only needs to be done + # in the event that a publisher is disabled or + # has transitioned from having origins to not + # having any at all; in any other case (the + # origins changing, etc.), refresh() will do the + # right thing. + self._img.remove_publisher_metadata(pub) + elif not pub.disabled and not refresh_catalog: + refresh_catalog = pub.needs_refresh + + if refresh_catalog and refresh_allowed: + # One of the publisher's repository origins may + # have changed, so the publisher needs to be + # revalidated. + + if validate: + self._img.transport.valid_publisher_test(pub) + + # Validate all new origins against publisher + # configuration. + for uri, ssl_cert, ssl_key, proxies, disabled in validate: + repo = publisher.RepositoryURI( + uri, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + proxies=proxies, + disabled=disabled, + ) + pub.validate_config(repo) + + self.__refresh( + pubs=[pub], immediate=True, ignore_unreachable=False + ) + elif refresh_catalog: + # Something has changed (such as a repository + # origin) for the publisher, so a refresh should + # occur, but isn't currently allowed. As such, + # clear the last_refreshed time so that the next + # time the client checks to see if a refresh is + # needed and is allowed, one will be performed. + pub.last_refreshed = None + except Exception as e: + # If any of the above fails, the original publisher + # information needs to be restored so that state is + # consistent. + cleanup() + raise + except: + # If any of the above fails, the original publisher + # information needs to be restored so that state is + # consistent. + cleanup() + raise + + if search_first: + self._img.set_highest_ranked_publisher(prefix=pub.prefix) + elif search_before: + self._img.pub_search_before(pub.prefix, search_before) + elif search_after: + self._img.pub_search_after(pub.prefix, search_after) + + # Successful; so save configuration. + self._img.save_config() + + self.__remove_unused_client_certificates() + + def __remove_unused_client_certificates(self): + """Remove unused client certificate files""" + + # Get certificate files currently in use. + ssl_path = os.path.join(self._img.imgdir, "ssl") + current_file_list = set() + pubs = self._img.get_publishers() + for p in pubs: + pub = pubs[p] + for origin in pub.repository.origins: + current_file_list.add(origin.ssl_key) + current_file_list.add(origin.ssl_cert) + + # Remove files found in ssl directory that + # are not in use by publishers. + for f in os.listdir(ssl_path): + path = os.path.join(ssl_path, f) + if path not in current_file_list: try: - if disable or (not repo.origins and - orig_pub[-1].repository.origins): - # Remove the publisher's metadata (such as - # catalogs, etc.). This only needs to be done - # in the event that a publisher is disabled or - # has transitioned from having origins to not - # having any at all; in any other case (the - # origins changing, etc.), refresh() will do the - # right thing. - self._img.remove_publisher_metadata(pub) - elif not pub.disabled and not refresh_catalog: - refresh_catalog = pub.needs_refresh - - if refresh_catalog and refresh_allowed: - # One of the publisher's repository origins may - # have changed, so the publisher needs to be - # revalidated. - - if validate: - self._img.transport.valid_publisher_test( - pub) - - # Validate all new origins against publisher - # configuration. - for uri, ssl_cert, ssl_key, proxies, disabled \ - in validate: - repo = publisher.RepositoryURI(uri, - ssl_cert=ssl_cert, ssl_key=ssl_key, - proxies=proxies, disabled=disabled) - pub.validate_config(repo) - - self.__refresh(pubs=[pub], immediate=True, - ignore_unreachable=False) - elif refresh_catalog: - # Something has changed (such as a repository - # origin) for the publisher, so a refresh should - # occur, but isn't currently allowed. As such, - # clear the last_refreshed time so that the next - # time the client checks to see if a refresh is - # needed and is allowed, one will be performed. - pub.last_refreshed = None - except Exception as e: - # If any of the above fails, the original publisher - # information needs to be restored so that state is - # consistent. - cleanup() - raise + portable.remove(path) except: - # If any of the above fails, the original publisher - # information needs to be restored so that state is - # consistent. - cleanup() - raise - - if search_first: - self._img.set_highest_ranked_publisher( - prefix=pub.prefix) - elif search_before: - self._img.pub_search_before(pub.prefix, search_before) - elif search_after: - self._img.pub_search_after(pub.prefix, search_after) - - # Successful; so save configuration. - self._img.save_config() - - self.__remove_unused_client_certificates() - - def __remove_unused_client_certificates(self): - """Remove unused client certificate files""" + continue + + def log_operation_end(self, error=None, result=None, release_notes=None): + """Marks the end of an operation to be recorded in image + history. + + 'result' should be a pkg.client.history constant value + representing the outcome of an operation. If not provided, + and 'error' is provided, the final result of the operation will + be based on the class of 'error' and 'error' will be recorded + for the current operation. If 'result' and 'error' is not + provided, success is assumed.""" + self._img.history.log_operation_end( + error=error, result=result, release_notes=release_notes + ) + + def log_operation_error(self, error): + """Adds an error to the list of errors to be recorded in image + history for the current opreation.""" + self._img.history.log_operation_error(error) + + def log_operation_start(self, name): + """Marks the start of an operation to be recorded in image + history.""" + # If an operation is going to be discarded, then don't take the + # performance hit of actually getting the BE info. + if name in history.DISCARDED_OPERATIONS: + be_name, be_uuid = None, None + else: + be_name, be_uuid = bootenv.BootEnv.get_be_name(self._img.root) + self._img.history.log_operation_start( + name, be_name=be_name, be_uuid=be_uuid + ) + + def parse_liname(self, name, unknown_ok=False): + """Parse a linked image name string and return a + LinkedImageName object. If "unknown_ok" is true then + liname must correspond to an existing linked image. If + "unknown_ok" is false and liname doesn't correspond to + an existing linked image then liname must be a + syntactically valid and fully qualified linked image + name.""" + + return self._img.linked.parse_name(name, unknown_ok=unknown_ok) + + def parse_p5i(self, data=None, fileobj=None, location=None): + """Reads the pkg(7) publisher JSON formatted data at 'location' + or from the provided file-like object 'fileobj' and returns a + list of tuples of the format (publisher object, pkg_names). + pkg_names is a list of strings representing package names or + FMRIs. If any pkg_names not specific to a publisher were + provided, the last tuple returned will be of the format (None, + pkg_names). + + 'data' is an optional string containing the p5i data. + + 'fileobj' is an optional file-like object that must support a + 'read' method for retrieving data. + + 'location' is an optional string value that should either start + with a leading slash and be pathname of a file or a URI string. + If it is a URI string, supported protocol schemes are 'file', + 'ftp', 'http', and 'https'. + + 'data' or 'fileobj' or 'location' must be provided.""" + + return p5i.parse(data=data, fileobj=fileobj, location=location) + + def parse_fmri_patterns(self, patterns): + """A generator function that yields a list of tuples of the form + (pattern, error, fmri, matcher) based on the provided patterns, + where 'error' is any exception encountered while parsing the + pattern, 'fmri' is the resulting FMRI object, and 'matcher' is + one of the following constant values: + + MATCH_EXACT + Indicates that the name portion of the pattern + must match exactly and the version (if provided) + must be considered a successor or equal to the + target FMRI. + + MATCH_FMRI + Indicates that the name portion of the pattern + must be a proper subset and the version (if + provided) must be considered a successor or + equal to the target FMRI. + + MATCH_GLOB + Indicates that the name portion of the pattern + uses fnmatch rules for pattern matching (shell- + style wildcards) and that the version can either + match exactly, match partially, or contain + wildcards. + """ - # Get certificate files currently in use. - ssl_path = os.path.join(self._img.imgdir, "ssl") - current_file_list = set() - pubs = self._img.get_publishers() - for p in pubs: - pub = pubs[p] - for origin in pub.repository.origins: - current_file_list.add(origin.ssl_key) - current_file_list.add(origin.ssl_cert) - - # Remove files found in ssl directory that - # are not in use by publishers. - for f in os.listdir(ssl_path): - path = os.path.join(ssl_path, f) - if path not in current_file_list: - try: - portable.remove(path) - except: - continue - - def log_operation_end(self, error=None, result=None, - release_notes=None): - """Marks the end of an operation to be recorded in image - history. - - 'result' should be a pkg.client.history constant value - representing the outcome of an operation. If not provided, - and 'error' is provided, the final result of the operation will - be based on the class of 'error' and 'error' will be recorded - for the current operation. If 'result' and 'error' is not - provided, success is assumed.""" - self._img.history.log_operation_end(error=error, result=result, - release_notes=release_notes) - - def log_operation_error(self, error): - """Adds an error to the list of errors to be recorded in image - history for the current opreation.""" - self._img.history.log_operation_error(error) - - def log_operation_start(self, name): - """Marks the start of an operation to be recorded in image - history.""" - # If an operation is going to be discarded, then don't take the - # performance hit of actually getting the BE info. - if name in history.DISCARDED_OPERATIONS: - be_name, be_uuid = None, None + for pat in patterns: + error = None + matcher = None + npat = None + try: + parts = pat.split("@", 1) + pat_stem = parts[0] + pat_ver = None + if len(parts) > 1: + pat_ver = parts[1] + + if "*" in pat_stem or "?" in pat_stem: + matcher = self.MATCH_GLOB + elif pat_stem.startswith("pkg:/") or pat_stem.startswith("/"): + matcher = self.MATCH_EXACT else: - be_name, be_uuid = bootenv.BootEnv.get_be_name( - self._img.root) - self._img.history.log_operation_start(name, - be_name=be_name, be_uuid=be_uuid) - - def parse_liname(self, name, unknown_ok=False): - """Parse a linked image name string and return a - LinkedImageName object. If "unknown_ok" is true then - liname must correspond to an existing linked image. If - "unknown_ok" is false and liname doesn't correspond to - an existing linked image then liname must be a - syntactically valid and fully qualified linked image - name.""" - - return self._img.linked.parse_name(name, unknown_ok=unknown_ok) - - def parse_p5i(self, data=None, fileobj=None, location=None): - """Reads the pkg(7) publisher JSON formatted data at 'location' - or from the provided file-like object 'fileobj' and returns a - list of tuples of the format (publisher object, pkg_names). - pkg_names is a list of strings representing package names or - FMRIs. If any pkg_names not specific to a publisher were - provided, the last tuple returned will be of the format (None, - pkg_names). - - 'data' is an optional string containing the p5i data. - - 'fileobj' is an optional file-like object that must support a - 'read' method for retrieving data. - - 'location' is an optional string value that should either start - with a leading slash and be pathname of a file or a URI string. - If it is a URI string, supported protocol schemes are 'file', - 'ftp', 'http', and 'https'. - - 'data' or 'fileobj' or 'location' must be provided.""" - - return p5i.parse(data=data, fileobj=fileobj, location=location) - - def parse_fmri_patterns(self, patterns): - """A generator function that yields a list of tuples of the form - (pattern, error, fmri, matcher) based on the provided patterns, - where 'error' is any exception encountered while parsing the - pattern, 'fmri' is the resulting FMRI object, and 'matcher' is - one of the following constant values: - - MATCH_EXACT - Indicates that the name portion of the pattern - must match exactly and the version (if provided) - must be considered a successor or equal to the - target FMRI. - - MATCH_FMRI - Indicates that the name portion of the pattern - must be a proper subset and the version (if - provided) must be considered a successor or - equal to the target FMRI. - - MATCH_GLOB - Indicates that the name portion of the pattern - uses fnmatch rules for pattern matching (shell- - style wildcards) and that the version can either - match exactly, match partially, or contain - wildcards. - """ + matcher = self.MATCH_FMRI - for pat in patterns: - error = None - matcher = None - npat = None - try: - parts = pat.split("@", 1) - pat_stem = parts[0] - pat_ver = None - if len(parts) > 1: - pat_ver = parts[1] - - if "*" in pat_stem or "?" in pat_stem: - matcher = self.MATCH_GLOB - elif pat_stem.startswith("pkg:/") or \ - pat_stem.startswith("/"): - matcher = self.MATCH_EXACT - else: - matcher = self.MATCH_FMRI - - if matcher == self.MATCH_GLOB: - npat = fmri.MatchingPkgFmri(pat_stem) - else: - npat = fmri.PkgFmri(pat_stem) - - if not pat_ver: - # Do nothing. - pass - elif "*" in pat_ver or "?" in pat_ver or \ - pat_ver == "latest": - npat.version = \ - pkg.version.MatchingVersion(pat_ver) - else: - npat.version = \ - pkg.version.Version(pat_ver) - - except (fmri.FmriError, pkg.version.VersionError) as e: - # Whatever the error was, return it. - error = e - yield (pat, error, npat, matcher) - - def purge_history(self): - """Deletes all client history.""" - be_name, be_uuid = bootenv.BootEnv.get_be_name(self._img.root) - self._img.history.purge(be_name=be_name, be_uuid=be_uuid) - - def update_format(self): - """Attempt to update the on-disk format of the image to the - newest version. Returns a boolean indicating whether any action - was taken.""" - - self._acquire_activity_lock() - try: - self._disable_cancel() - self._img.allow_ondisk_upgrade = True - return self._img.update_format( - progtrack=self.__progresstracker) - except apx.CanceledException as e: - self._cancel_done() - raise - finally: - self._activity_lock.release() - - def write_p5i(self, fileobj, pkg_names=None, pubs=None): - """Writes the publisher, repository, and provided package names - to the provided file-like object 'fileobj' in JSON p5i format. - - 'fileobj' is only required to have a 'write' method that accepts - data to be written as a parameter. - - 'pkg_names' is a dict of lists, tuples, or sets indexed by - publisher prefix that contain package names, FMRI strings, or - package info objects. A prefix of "" can be used for packages - that are not specific to a publisher. - - 'pubs' is an optional list of publisher prefixes or Publisher - objects. If not provided, the information for all publishers - (excluding those disabled) will be output.""" + if matcher == self.MATCH_GLOB: + npat = fmri.MatchingPkgFmri(pat_stem) + else: + npat = fmri.PkgFmri(pat_stem) - if not pubs: - plist = [ - p for p in self.get_publishers() - if not p.disabled - ] + if not pat_ver: + # Do nothing. + pass + elif "*" in pat_ver or "?" in pat_ver or pat_ver == "latest": + npat.version = pkg.version.MatchingVersion(pat_ver) else: - plist = [] - for p in pubs: - if not isinstance(p, publisher.Publisher): - plist.append(self._img.get_publisher( - prefix=p, alias=p)) - else: - plist.append(p) - - # Transform PackageInfo object entries into PkgFmri entries - # before passing them to the p5i module. - new_pkg_names = {} - for pub in pkg_names: - pkglist = [] - for p in pkg_names[pub]: - if isinstance(p, PackageInfo): - pkglist.append(p.fmri) - else: - pkglist.append(p) - new_pkg_names[pub] = pkglist - p5i.write(fileobj, plist, pkg_names=new_pkg_names) - - def write_syspub(self, path, prefixes, version): - """Write the syspub/version response to the provided path.""" - if version != 0: - raise apx.UnsupportedP5SVersion(version) - - pubs = [ - p for p in self.get_publishers() - if p.prefix in prefixes - ] - fd, fp = tempfile.mkstemp() - try: - fh = os.fdopen(fd, "w") - p5s.write(fh, pubs, self._img.cfg) - fh.close() - portable.rename(fp, path) - except: - if os.path.exists(fp): - portable.remove(fp) - raise + npat.version = pkg.version.Version(pat_ver) - def get_dehydrated_publishers(self): - """Return the list of dehydrated publishers' prefixes.""" + except (fmri.FmriError, pkg.version.VersionError) as e: + # Whatever the error was, return it. + error = e + yield (pat, error, npat, matcher) - return self._img.cfg.get_property("property", "dehydrated") + def purge_history(self): + """Deletes all client history.""" + be_name, be_uuid = bootenv.BootEnv.get_be_name(self._img.root) + self._img.history.purge(be_name=be_name, be_uuid=be_uuid) + def update_format(self): + """Attempt to update the on-disk format of the image to the + newest version. Returns a boolean indicating whether any action + was taken.""" -class Query(query_p.Query): - """This class is the object used to pass queries into the api functions. - It encapsulates the possible options available for a query as well as - the text of the query itself.""" - - def __init__(self, text, case_sensitive, return_actions=True, - num_to_return=None, start_point=None): - if return_actions: - return_type = query_p.Query.RETURN_ACTIONS + self._acquire_activity_lock() + try: + self._disable_cancel() + self._img.allow_ondisk_upgrade = True + return self._img.update_format(progtrack=self.__progresstracker) + except apx.CanceledException as e: + self._cancel_done() + raise + finally: + self._activity_lock.release() + + def write_p5i(self, fileobj, pkg_names=None, pubs=None): + """Writes the publisher, repository, and provided package names + to the provided file-like object 'fileobj' in JSON p5i format. + + 'fileobj' is only required to have a 'write' method that accepts + data to be written as a parameter. + + 'pkg_names' is a dict of lists, tuples, or sets indexed by + publisher prefix that contain package names, FMRI strings, or + package info objects. A prefix of "" can be used for packages + that are not specific to a publisher. + + 'pubs' is an optional list of publisher prefixes or Publisher + objects. If not provided, the information for all publishers + (excluding those disabled) will be output.""" + + if not pubs: + plist = [p for p in self.get_publishers() if not p.disabled] + else: + plist = [] + for p in pubs: + if not isinstance(p, publisher.Publisher): + plist.append(self._img.get_publisher(prefix=p, alias=p)) else: - return_type = query_p.Query.RETURN_PACKAGES - try: - query_p.Query.__init__(self, text, case_sensitive, - return_type, num_to_return, start_point) - except query_p.QueryLengthExceeded as e: - raise apx.ParseError(e) - - -def get_default_image_root(orig_cwd=None): - """Returns a tuple of (root, exact_match) where 'root' is the absolute - path of the default image root based on current environment given the - client working directory and platform defaults, and 'exact_match' is a - boolean specifying how the default should be treated by ImageInterface. - Note that the root returned may not actually be the valid root of an - image; it is merely the default location a client should use when - initializing an ImageInterface (e.g. '/' is not a valid image on Solaris - 10). - - The ImageInterface object will use the root provided as a starting point - to find an image, searching upwards through each parent directory until - '/' is reached based on the value of exact_match. - - 'orig_cwd' should be the original current working directory at the time - of client startup. This value is assumed to be valid if provided, - although permission and access errors will be gracefully handled. - """ - - # If an image location wasn't explicitly specified, check $PKG_IMAGE in - # the environment. - root = os.environ.get("PKG_IMAGE") - exact_match = True - if not root: - if os.environ.get("PKG_FIND_IMAGE") or \ - portable.osname != "sunos": - # If no image location was found in the environment, - # then see if user enabled finding image or if current - # platform isn't Solaris. If so, attempt to find the - # image starting with the working directory. - root = orig_cwd - if root: - exact_match = False - if not root: - # If no image directory has been determined based on - # request or environment, default to live root. - root = misc.liveroot() - return root, exact_match - - -def image_create(pkg_client_name, version_id, root, imgtype, is_zone, - cancel_state_callable=None, facets=misc.EmptyDict, force=False, - mirrors=misc.EmptyI, origins=misc.EmptyI, prefix=None, refresh_allowed=True, - repo_uri=None, ssl_cert=None, ssl_key=None, user_provided_dir=False, - progtrack=None, variants=misc.EmptyDict, props=misc.EmptyDict, - cmdpath=None): - """Creates an image at the specified location. + plist.append(p) + + # Transform PackageInfo object entries into PkgFmri entries + # before passing them to the p5i module. + new_pkg_names = {} + for pub in pkg_names: + pkglist = [] + for p in pkg_names[pub]: + if isinstance(p, PackageInfo): + pkglist.append(p.fmri) + else: + pkglist.append(p) + new_pkg_names[pub] = pkglist + p5i.write(fileobj, plist, pkg_names=new_pkg_names) - 'pkg_client_name' is a string containing the name of the client, - such as "pkg". + def write_syspub(self, path, prefixes, version): + """Write the syspub/version response to the provided path.""" + if version != 0: + raise apx.UnsupportedP5SVersion(version) - 'version_id' indicates the version of the api the client is - expecting to use. + pubs = [p for p in self.get_publishers() if p.prefix in prefixes] + fd, fp = tempfile.mkstemp() + try: + fh = os.fdopen(fd, "w") + p5s.write(fh, pubs, self._img.cfg) + fh.close() + portable.rename(fp, path) + except: + if os.path.exists(fp): + portable.remove(fp) + raise - 'root' is the absolute path of the directory where the image will - be created. If it does not exist, it will be created. - - 'imgtype' is an IMG_TYPE constant representing the type of image - to create. - - 'is_zone' is a boolean value indicating whether the image being - created is for a zone. - - 'cancel_state_callable' is an optional function reference that will - be called if the cancellable status of an operation changes. - - 'facets' is a dictionary of facet names and values to set during - the image creation process. - - 'force' is an optional boolean value indicating that if an image - already exists at the specified 'root' that it should be overwritten. - - 'mirrors' is an optional list of URI strings that should be added to - all publishers configured during image creation as mirrors. - - 'origins' is an optional list of URI strings that should be added to - all publishers configured during image creation as origins. - - 'prefix' is an optional publisher prefix to configure as a publisher - for the new image if origins is provided, or to restrict which publisher - will be configured if 'repo_uri' is provided. If this prefix does not - match the publisher configuration retrieved from the repository, an - UnknownRepositoryPublishers exception will be raised. If not provided, - 'refresh_allowed' cannot be False. - - 'props' is an optional dictionary mapping image property names to values - to be set while creating the image. - - 'refresh_allowed' is an optional boolean value indicating whether - publisher configuration data and metadata can be retrieved during - image creation. If False, 'repo_uri' cannot be specified and - a 'prefix' must be provided. - - 'repo_uri' is an optional URI string of a package repository to - retrieve publisher configuration information from. If the target - repository supports this, all publishers found will be added to the - image and any origins or mirrors will be added to all of those - publishers. If the target repository does not support this, and a - prefix was not specified, an UnsupportedRepositoryOperation exception - will be raised. If the target repository supports the operation, but - does not provide complete configuration information, a - RepoPubConfigUnavailable exception will be raised. - - 'ssl_cert' is an optional pathname of an SSL Certificate file to - configure all publishers with and to use when retrieving publisher - configuration information. If provided, 'ssl_key' must also be - provided. The certificate file must be pem-encoded. - - 'ssl_key' is an optional pathname of an SSL Key file to configure all - publishers with and to use when retrieving publisher configuration - information. If provided, 'ssl_cert' must also be provided. The - key file must be pem-encoded. - - 'user_provided_dir' is an optional boolean value indicating that the - provided 'root' was user-supplied and that additional error handling - should be enforced. This primarily affects cases where a relative - root has been provided or the root was based on the current working - directory. - - 'progtrack' is an optional ProgressTracker object. - - 'variants' is a dictionary of variant names and values to set during - the image creation process. - - Callers must provide one of the following when calling this function: - * no 'prefix' and no 'origins' - * a 'prefix' and 'repo_uri' (origins and mirrors are optional) - * no 'prefix' and a 'repo_uri' (origins and mirrors are optional) - * a 'prefix' and 'origins' - """ + def get_dehydrated_publishers(self): + """Return the list of dehydrated publishers' prefixes.""" - # Caller must provide a prefix and repository, or no prefix and a - # repository, or a prefix and origins, or no prefix and no origins. - assert (prefix and repo_uri) or (not prefix and repo_uri) or (prefix and - origins or (not prefix and not origins)) + return self._img.cfg.get_property("property", "dehydrated") - # If prefix isn't provided and refresh isn't allowed, then auto-config - # cannot be done. - assert (prefix or refresh_allowed) or not repo_uri - destroy_root = False +class Query(query_p.Query): + """This class is the object used to pass queries into the api functions. + It encapsulates the possible options available for a query as well as + the text of the query itself.""" + + def __init__( + self, + text, + case_sensitive, + return_actions=True, + num_to_return=None, + start_point=None, + ): + if return_actions: + return_type = query_p.Query.RETURN_ACTIONS + else: + return_type = query_p.Query.RETURN_PACKAGES try: - destroy_root = not os.path.exists(root) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException( - e.filename) - raise - - # The image object must be created first since transport may be - # needed to retrieve publisher configuration information. - img = image.Image(root, force=force, imgtype=imgtype, - progtrack=progtrack, should_exist=False, - user_provided_dir=user_provided_dir, cmdpath=cmdpath, - props=props) - api_inst = ImageInterface(img, version_id, - progtrack, cancel_state_callable, pkg_client_name, - cmdpath=cmdpath) + query_p.Query.__init__( + self, + text, + case_sensitive, + return_type, + num_to_return, + start_point, + ) + except query_p.QueryLengthExceeded as e: + raise apx.ParseError(e) - pubs = [] - try: - if repo_uri: - # Assume auto configuration. - if ssl_cert: - try: - misc.validate_ssl_cert( - ssl_cert, - prefix=prefix, - uri=repo_uri) - except apx.ExpiringCertificate as e: - logger.warning(e) - - repo = publisher.RepositoryURI(repo_uri, - ssl_cert=ssl_cert, ssl_key=ssl_key) - - pubs = None - try: - pubs = api_inst.get_publisherdata(repo=repo) - except apx.UnsupportedRepositoryOperation: - if not prefix: - raise apx.RepoPubConfigUnavailable( - location=repo_uri) - # For a v0 repo where a prefix was specified, - # fallback to manual configuration. - if not origins: - origins = [repo_uri] - repo_uri = None - - if not prefix and not pubs: - # Empty repository configuration. - raise apx.RepoPubConfigUnavailable( - location=repo_uri) - - if repo_uri: - for p in pubs: - psrepo = p.repository - if not psrepo: - # Repository configuration info - # was not provided, so assume - # origin is repo_uri. - p.repository = \ - publisher.Repository( - origins=[repo_uri]) - elif not psrepo.origins: - # Repository configuration was - # provided, but without an - # origin. Assume the repo_uri - # is the origin. - psrepo.add_origin(repo_uri) - elif repo not in psrepo.origins: - # If the repo_uri used is not - # in the list of sources, then - # add it as the first origin. - psrepo.origins.insert(0, repo) - - if prefix and not repo_uri: - # Auto-configuration not possible or not requested. - if ssl_cert: - try: - misc.validate_ssl_cert( - ssl_cert, - prefix=prefix, - uri=origins[0]) - except apx.ExpiringCertificate as e: - logger.warning(e) - - repo = publisher.Repository() - for o in origins: - repo.add_origin(o) # pylint: disable=E1103 - for m in mirrors: - repo.add_mirror(m) # pylint: disable=E1103 - pub = publisher.Publisher(prefix, - repository=repo) - pubs = [pub] - - if prefix and prefix not in pubs: - # If publisher prefix requested isn't found in the list - # of publishers at this point, then configuration isn't - # possible. - known = [p.prefix for p in pubs] - raise apx.UnknownRepositoryPublishers( - known=known, unknown=[prefix], location=repo_uri) - elif prefix: - # Filter out any publishers that weren't requested. - pubs = [ - p for p in pubs - if p.prefix == prefix - ] - - # Add additional origins and mirrors that weren't found in the - # publisher configuration if provided. - for p in pubs: - pr = p.repository - for o in origins: - if not pr.has_origin(o): - pr.add_origin(o) - for m in mirrors: - if not pr.has_mirror(m): - pr.add_mirror(m) - - # Set provided SSL Cert/Key for all configured publishers. +def get_default_image_root(orig_cwd=None): + """Returns a tuple of (root, exact_match) where 'root' is the absolute + path of the default image root based on current environment given the + client working directory and platform defaults, and 'exact_match' is a + boolean specifying how the default should be treated by ImageInterface. + Note that the root returned may not actually be the valid root of an + image; it is merely the default location a client should use when + initializing an ImageInterface (e.g. '/' is not a valid image on Solaris + 10). + + The ImageInterface object will use the root provided as a starting point + to find an image, searching upwards through each parent directory until + '/' is reached based on the value of exact_match. + + 'orig_cwd' should be the original current working directory at the time + of client startup. This value is assumed to be valid if provided, + although permission and access errors will be gracefully handled. + """ + + # If an image location wasn't explicitly specified, check $PKG_IMAGE in + # the environment. + root = os.environ.get("PKG_IMAGE") + exact_match = True + if not root: + if os.environ.get("PKG_FIND_IMAGE") or portable.osname != "sunos": + # If no image location was found in the environment, + # then see if user enabled finding image or if current + # platform isn't Solaris. If so, attempt to find the + # image starting with the working directory. + root = orig_cwd + if root: + exact_match = False + if not root: + # If no image directory has been determined based on + # request or environment, default to live root. + root = misc.liveroot() + return root, exact_match + + +def image_create( + pkg_client_name, + version_id, + root, + imgtype, + is_zone, + cancel_state_callable=None, + facets=misc.EmptyDict, + force=False, + mirrors=misc.EmptyI, + origins=misc.EmptyI, + prefix=None, + refresh_allowed=True, + repo_uri=None, + ssl_cert=None, + ssl_key=None, + user_provided_dir=False, + progtrack=None, + variants=misc.EmptyDict, + props=misc.EmptyDict, + cmdpath=None, +): + """Creates an image at the specified location. + + 'pkg_client_name' is a string containing the name of the client, + such as "pkg". + + 'version_id' indicates the version of the api the client is + expecting to use. + + 'root' is the absolute path of the directory where the image will + be created. If it does not exist, it will be created. + + 'imgtype' is an IMG_TYPE constant representing the type of image + to create. + + 'is_zone' is a boolean value indicating whether the image being + created is for a zone. + + 'cancel_state_callable' is an optional function reference that will + be called if the cancellable status of an operation changes. + + 'facets' is a dictionary of facet names and values to set during + the image creation process. + + 'force' is an optional boolean value indicating that if an image + already exists at the specified 'root' that it should be overwritten. + + 'mirrors' is an optional list of URI strings that should be added to + all publishers configured during image creation as mirrors. + + 'origins' is an optional list of URI strings that should be added to + all publishers configured during image creation as origins. + + 'prefix' is an optional publisher prefix to configure as a publisher + for the new image if origins is provided, or to restrict which publisher + will be configured if 'repo_uri' is provided. If this prefix does not + match the publisher configuration retrieved from the repository, an + UnknownRepositoryPublishers exception will be raised. If not provided, + 'refresh_allowed' cannot be False. + + 'props' is an optional dictionary mapping image property names to values + to be set while creating the image. + + 'refresh_allowed' is an optional boolean value indicating whether + publisher configuration data and metadata can be retrieved during + image creation. If False, 'repo_uri' cannot be specified and + a 'prefix' must be provided. + + 'repo_uri' is an optional URI string of a package repository to + retrieve publisher configuration information from. If the target + repository supports this, all publishers found will be added to the + image and any origins or mirrors will be added to all of those + publishers. If the target repository does not support this, and a + prefix was not specified, an UnsupportedRepositoryOperation exception + will be raised. If the target repository supports the operation, but + does not provide complete configuration information, a + RepoPubConfigUnavailable exception will be raised. + + 'ssl_cert' is an optional pathname of an SSL Certificate file to + configure all publishers with and to use when retrieving publisher + configuration information. If provided, 'ssl_key' must also be + provided. The certificate file must be pem-encoded. + + 'ssl_key' is an optional pathname of an SSL Key file to configure all + publishers with and to use when retrieving publisher configuration + information. If provided, 'ssl_cert' must also be provided. The + key file must be pem-encoded. + + 'user_provided_dir' is an optional boolean value indicating that the + provided 'root' was user-supplied and that additional error handling + should be enforced. This primarily affects cases where a relative + root has been provided or the root was based on the current working + directory. + + 'progtrack' is an optional ProgressTracker object. + + 'variants' is a dictionary of variant names and values to set during + the image creation process. + + Callers must provide one of the following when calling this function: + * no 'prefix' and no 'origins' + * a 'prefix' and 'repo_uri' (origins and mirrors are optional) + * no 'prefix' and a 'repo_uri' (origins and mirrors are optional) + * a 'prefix' and 'origins' + """ + + # Caller must provide a prefix and repository, or no prefix and a + # repository, or a prefix and origins, or no prefix and no origins. + assert ( + (prefix and repo_uri) + or (not prefix and repo_uri) + or (prefix and origins or (not prefix and not origins)) + ) + + # If prefix isn't provided and refresh isn't allowed, then auto-config + # cannot be done. + assert (prefix or refresh_allowed) or not repo_uri + + destroy_root = False + try: + destroy_root = not os.path.exists(root) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + raise + + # The image object must be created first since transport may be + # needed to retrieve publisher configuration information. + img = image.Image( + root, + force=force, + imgtype=imgtype, + progtrack=progtrack, + should_exist=False, + user_provided_dir=user_provided_dir, + cmdpath=cmdpath, + props=props, + ) + api_inst = ImageInterface( + img, + version_id, + progtrack, + cancel_state_callable, + pkg_client_name, + cmdpath=cmdpath, + ) + + pubs = [] + + try: + if repo_uri: + # Assume auto configuration. + if ssl_cert: + try: + misc.validate_ssl_cert( + ssl_cert, prefix=prefix, uri=repo_uri + ) + except apx.ExpiringCertificate as e: + logger.warning(e) + + repo = publisher.RepositoryURI( + repo_uri, ssl_cert=ssl_cert, ssl_key=ssl_key + ) + + pubs = None + try: + pubs = api_inst.get_publisherdata(repo=repo) + except apx.UnsupportedRepositoryOperation: + if not prefix: + raise apx.RepoPubConfigUnavailable(location=repo_uri) + # For a v0 repo where a prefix was specified, + # fallback to manual configuration. + if not origins: + origins = [repo_uri] + repo_uri = None + + if not prefix and not pubs: + # Empty repository configuration. + raise apx.RepoPubConfigUnavailable(location=repo_uri) + + if repo_uri: for p in pubs: - repo = p.repository - for o in repo.origins: - if o.scheme not in publisher.SSL_SCHEMES: - continue - o.ssl_cert = ssl_cert - o.ssl_key = ssl_key - for m in repo.mirrors: - if m.scheme not in publisher.SSL_SCHEMES: - continue - m.ssl_cert = ssl_cert - m.ssl_key = ssl_key - - img.create(pubs, facets=facets, is_zone=is_zone, - progtrack=progtrack, refresh_allowed=refresh_allowed, - variants=variants, props=props) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - raise - except: - # Ensure a defunct image isn't left behind. - img.destroy() - if destroy_root and \ - os.path.abspath(root) != "/" and \ - os.path.exists(root): - # Root didn't exist before create and isn't '/', - # so remove it. - shutil.rmtree(root, True) - raise - - img.cleanup_downloads() + psrepo = p.repository + if not psrepo: + # Repository configuration info + # was not provided, so assume + # origin is repo_uri. + p.repository = publisher.Repository(origins=[repo_uri]) + elif not psrepo.origins: + # Repository configuration was + # provided, but without an + # origin. Assume the repo_uri + # is the origin. + psrepo.add_origin(repo_uri) + elif repo not in psrepo.origins: + # If the repo_uri used is not + # in the list of sources, then + # add it as the first origin. + psrepo.origins.insert(0, repo) + + if prefix and not repo_uri: + # Auto-configuration not possible or not requested. + if ssl_cert: + try: + misc.validate_ssl_cert( + ssl_cert, prefix=prefix, uri=origins[0] + ) + except apx.ExpiringCertificate as e: + logger.warning(e) + + repo = publisher.Repository() + for o in origins: + repo.add_origin(o) # pylint: disable=E1103 + for m in mirrors: + repo.add_mirror(m) # pylint: disable=E1103 + pub = publisher.Publisher(prefix, repository=repo) + pubs = [pub] + + if prefix and prefix not in pubs: + # If publisher prefix requested isn't found in the list + # of publishers at this point, then configuration isn't + # possible. + known = [p.prefix for p in pubs] + raise apx.UnknownRepositoryPublishers( + known=known, unknown=[prefix], location=repo_uri + ) + elif prefix: + # Filter out any publishers that weren't requested. + pubs = [p for p in pubs if p.prefix == prefix] + + # Add additional origins and mirrors that weren't found in the + # publisher configuration if provided. + for p in pubs: + pr = p.repository + for o in origins: + if not pr.has_origin(o): + pr.add_origin(o) + for m in mirrors: + if not pr.has_mirror(m): + pr.add_mirror(m) + + # Set provided SSL Cert/Key for all configured publishers. + for p in pubs: + repo = p.repository + for o in repo.origins: + if o.scheme not in publisher.SSL_SCHEMES: + continue + o.ssl_cert = ssl_cert + o.ssl_key = ssl_key + for m in repo.mirrors: + if m.scheme not in publisher.SSL_SCHEMES: + continue + m.ssl_cert = ssl_cert + m.ssl_key = ssl_key + + img.create( + pubs, + facets=facets, + is_zone=is_zone, + progtrack=progtrack, + refresh_allowed=refresh_allowed, + variants=variants, + props=props, + ) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + raise + except: + # Ensure a defunct image isn't left behind. + img.destroy() + if ( + destroy_root + and os.path.abspath(root) != "/" + and os.path.exists(root) + ): + # Root didn't exist before create and isn't '/', + # so remove it. + shutil.rmtree(root, True) + raise + + img.cleanup_downloads() + + return api_inst - return api_inst # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/api_errors.py b/src/modules/client/api_errors.py index abd53499d..25e726181 100644 --- a/src/modules/client/api_errors.py +++ b/src/modules/client/api_errors.py @@ -40,3216 +40,3608 @@ # dependency. EmptyI = tuple() + class ApiException(Exception): - def __init__(self, *args): - Exception.__init__(self) - self.__verbose_info = [] + def __init__(self, *args): + Exception.__init__(self) + self.__verbose_info = [] + + def add_verbose_info(self, info): + self.__verbose_info.extend(info) - def add_verbose_info(self, info): - self.__verbose_info.extend(info) + @property + def verbose_info(self): + return self.__verbose_info - @property - def verbose_info(self): - return self.__verbose_info class SuidUnsupportedError(ApiException): - def __str__(self): - return _(""" -The pkg client api module can not be invoked from an setuid executable.""") + def __str__(self): + return _( + """ +The pkg client api module can not be invoked from an setuid executable.""" + ) class HistoryException(ApiException): - """Private base exception class for all History exceptions.""" + """Private base exception class for all History exceptions.""" - def __init__(self, *args): - Exception.__init__(self, *args) - self.error = args[0] + def __init__(self, *args): + Exception.__init__(self, *args) + self.error = args[0] - def __str__(self): - return str(self.error) + def __str__(self): + return str(self.error) class HistoryLoadException(HistoryException): - """Used to indicate that an unexpected error occurred while loading - History operation information. + """Used to indicate that an unexpected error occurred while loading + History operation information. + + The first argument should be an exception object related to the + error encountered. + """ - The first argument should be an exception object related to the - error encountered. - """ - def __init__(self, *args): - HistoryException.__init__(self, *args) - self.parse_failure = isinstance(self.error, expat.ExpatError) + def __init__(self, *args): + HistoryException.__init__(self, *args) + self.parse_failure = isinstance(self.error, expat.ExpatError) class HistoryRequestException(HistoryException): - """Used to indicate that invalid time / range values were provided to - history API functions.""" - pass + """Used to indicate that invalid time / range values were provided to + history API functions.""" + + pass class HistoryStoreException(HistoryException): - """Used to indicate that an unexpected error occurred while storing - History operation information. + """Used to indicate that an unexpected error occurred while storing + History operation information. + + The first argument should be an exception object related to the + error encountered. + """ - The first argument should be an exception object related to the - error encountered. - """ - pass + pass class HistoryPurgeException(HistoryException): - """Used to indicate that an unexpected error occurred while purging - History operation information. + """Used to indicate that an unexpected error occurred while purging + History operation information. + + The first argument should be an exception object related to the + error encountered. + """ + + pass - The first argument should be an exception object related to the - error encountered. - """ - pass class ImageLockedError(ApiException): - """Used to indicate that the image is currently locked by another thread - or process and cannot be modified.""" - - def __init__(self, hostname=None, pid=None, pid_name=None): - ApiException.__init__(self) - self.hostname = hostname - self.pid = pid - self.pid_name = pid_name - - def __str__(self): - if self.pid is not None and self.pid_name is not None and \ - self.hostname is not None: - return _("The image cannot be modified as it is " - "currently in use by another package client: " - "{pid_name} on {host}, pid {pid}.").format( - pid_name=self.pid_name, pid=self.pid, - host=self.hostname) - if self.pid is not None and self.pid_name is not None: - return _("The image cannot be modified as it is " - "currently in use by another package client: " - "{pid_name} on an unknown host, pid {pid}.").format( - pid_name=self.pid_name, pid=self.pid) - elif self.pid is not None: - return _("The image cannot be modified as it is " - "currently in use by another package client: " - "pid {pid} on {host}.").format( - pid=self.pid, host=self.hostname) - return _("The image cannot be modified as it is currently " - "in use by another package client.") + """Used to indicate that the image is currently locked by another thread + or process and cannot be modified.""" + + def __init__(self, hostname=None, pid=None, pid_name=None): + ApiException.__init__(self) + self.hostname = hostname + self.pid = pid + self.pid_name = pid_name + + def __str__(self): + if ( + self.pid is not None + and self.pid_name is not None + and self.hostname is not None + ): + return _( + "The image cannot be modified as it is " + "currently in use by another package client: " + "{pid_name} on {host}, pid {pid}." + ).format(pid_name=self.pid_name, pid=self.pid, host=self.hostname) + if self.pid is not None and self.pid_name is not None: + return _( + "The image cannot be modified as it is " + "currently in use by another package client: " + "{pid_name} on an unknown host, pid {pid}." + ).format(pid_name=self.pid_name, pid=self.pid) + elif self.pid is not None: + return _( + "The image cannot be modified as it is " + "currently in use by another package client: " + "pid {pid} on {host}." + ).format(pid=self.pid, host=self.hostname) + return _( + "The image cannot be modified as it is currently " + "in use by another package client." + ) + class ImageLockingFailedError(ApiException): - """Used to indicate that the image could not be locked.""" + """Used to indicate that the image could not be locked.""" + + def __init__(self, root_dir, err): + ApiException.__init__(self) + self.root_dir = root_dir + self.err = err - def __init__(self, root_dir, err): - ApiException.__init__(self) - self.root_dir = root_dir - self.err = err + def __str__(self): + return _("Failed to lock the image rooted at {0}: {1}").format( + self.root_dir, self.err + ) - def __str__(self): - return _("Failed to lock the image rooted at {0}: {1}").format( - self.root_dir, self.err) class ImageNotFoundException(ApiException): - """Used when an image was not found""" - def __init__(self, user_specified, user_dir, root_dir): - ApiException.__init__(self) - self.user_specified = user_specified - self.user_dir = user_dir - self.root_dir = root_dir + """Used when an image was not found""" + + def __init__(self, user_specified, user_dir, root_dir): + ApiException.__init__(self) + self.user_specified = user_specified + self.user_dir = user_dir + self.root_dir = root_dir + + def __str__(self): + return _("No image rooted at '{0}'").format(self.user_dir) - def __str__(self): - return _("No image rooted at '{0}'").format(self.user_dir) class ImageMissingKeyFile(ApiException): - """Used when an image does not contain all expected key files""" - def __init__(self, keyfile): - ApiException.__init__(self) - self.keyfile = keyfile + """Used when an image does not contain all expected key files""" + + def __init__(self, keyfile): + ApiException.__init__(self) + self.keyfile = keyfile + + def __str__(self): + return _( + "Image is missing key file. " "Is everything mounted? '{0}'" + ).format(self.keyfile) - def __str__(self): - return _("Image is missing key file. " - "Is everything mounted? '{0}'").format(self.keyfile) class ImageFormatUpdateNeeded(ApiException): - """Used to indicate that an image cannot be used until its format is - updated.""" + """Used to indicate that an image cannot be used until its format is + updated.""" + + def __init__(self, path): + ApiException.__init__(self) + self.path = path - def __init__(self, path): - ApiException.__init__(self) - self.path = path + def __str__(self): + return _( + "The image rooted at {0} is written in an older format " + "and must be updated before the requested operation can be " + "performed." + ).format(self.path) - def __str__(self): - return _("The image rooted at {0} is written in an older format " - "and must be updated before the requested operation can be " - "performed.").format(self.path) class ImageInsufficentSpace(ApiException): - """Used when insuffcient space exists for proposed operation""" - def __init__(self, needed, avail, use): - self.needed = needed - self.avail = avail - self.use = use - - def __str__(self): - from pkg.misc import bytes_to_str - return _("Insufficient disk space available ({avail}) " - "for estimated need ({needed}) for {use}").format( - avail=bytes_to_str(self.avail), - needed=bytes_to_str(self.needed), - use=self.use - ) + """Used when insuffcient space exists for proposed operation""" + + def __init__(self, needed, avail, use): + self.needed = needed + self.avail = avail + self.use = use + + def __str__(self): + from pkg.misc import bytes_to_str + + return _( + "Insufficient disk space available ({avail}) " + "for estimated need ({needed}) for {use}" + ).format( + avail=bytes_to_str(self.avail), + needed=bytes_to_str(self.needed), + use=self.use, + ) class VersionException(ApiException): - def __init__(self, expected_version, received_version): - ApiException.__init__(self) - self.expected_version = expected_version - self.received_version = received_version + def __init__(self, expected_version, received_version): + ApiException.__init__(self) + self.expected_version = expected_version + self.received_version = received_version class PlanExistsException(ApiException): - def __init__(self, plan_type): - ApiException.__init__(self) - self.plan_type = plan_type + def __init__(self, plan_type): + ApiException.__init__(self) + self.plan_type = plan_type class PlanPrepareException(ApiException): - """Base exception class for plan preparation errors.""" - pass + """Base exception class for plan preparation errors.""" + + pass class InvalidPackageErrors(ApiException): - """Used to indicate that the requested operation could not be completed - as one or more packages contained invalid metadata.""" + """Used to indicate that the requested operation could not be completed + as one or more packages contained invalid metadata.""" - def __init__(self, errors): - """'errors' should be a list of exceptions or strings - indicating what packages had errors and why.""" + def __init__(self, errors): + """'errors' should be a list of exceptions or strings + indicating what packages had errors and why.""" - ApiException.__init__(self) - self.errors = errors + ApiException.__init__(self) + self.errors = errors + + def __str__(self): + return _( + "The requested operation cannot be completed due " + "to invalid package metadata. Details follow:\n\n" + "{0}" + ).format("\n".join(str(e) for e in self.errors)) - def __str__(self): - return _("The requested operation cannot be completed due " - "to invalid package metadata. Details follow:\n\n" - "{0}").format("\n".join(str(e) for e in self.errors)) class PlanExclusionError(ApiException): - """Used to indicate that the requested operation could not be executed - due to exclusions configured on the image.""" + """Used to indicate that the requested operation could not be executed + due to exclusions configured on the image.""" - def __init__(self, paths): - self.paths = paths + def __init__(self, paths): + self.paths = paths - def __str__(self): - return "{0}\n\n {1}\n\n{2}".format( - _("""\ + def __str__(self): + return "{0}\n\n {1}\n\n{2}".format( + _( + """\ The files listed below match exclusions which are configured -on this image and can therefore not be installed:"""), - "\n ".join(list(self.paths)), - _("""\ +on this image and can therefore not be installed:""" + ), + "\n ".join(list(self.paths)), + _( + """\ See the 'exclude-patterns' and 'exclude-policy' image properties in pkg(1) for -more information.""") - ) +more information.""" + ), + ) + class LicenseAcceptanceError(ApiException): - """Used to indicate that license-related errors occurred during - plan evaluation or execution.""" + """Used to indicate that license-related errors occurred during + plan evaluation or execution.""" - def __init__(self, pfmri, src=None, dest=None, accepted=None, - displayed=None): - ApiException.__init__(self) - self.fmri = pfmri - self.src = src - self.dest = dest - self.accepted = accepted - self.displayed = displayed + def __init__( + self, pfmri, src=None, dest=None, accepted=None, displayed=None + ): + ApiException.__init__(self) + self.fmri = pfmri + self.src = src + self.dest = dest + self.accepted = accepted + self.displayed = displayed class PkgLicenseErrors(PlanPrepareException): - """Used to indicate that plan evaluation or execution failed due - to license-related errors for a package.""" + """Used to indicate that plan evaluation or execution failed due + to license-related errors for a package.""" - def __init__(self, errors): - """'errors' should be a list of LicenseAcceptanceError - exceptions.""" + def __init__(self, errors): + """'errors' should be a list of LicenseAcceptanceError + exceptions.""" - PlanPrepareException.__init__(self) - self.__errors = errors + PlanPrepareException.__init__(self) + self.__errors = errors - @property - def errors(self): - """A list of LicenseAcceptanceError exceptions.""" - return self.__errors + @property + def errors(self): + """A list of LicenseAcceptanceError exceptions.""" + return self.__errors class PlanLicenseErrors(PlanPrepareException): - """Used to indicate that image plan evaluation or execution failed due - to license-related errors.""" - - def __init__(self, pp_errors): - """'errors' should be a list of PkgLicenseErrors exceptions.""" - - PlanPrepareException.__init__(self) - self.__errors = pkgs = {} - for pp_err in pp_errors: - for e in pp_err.errors: - pkgs.setdefault(str(e.fmri), []).append(e) - - @property - def errors(self): - """Returns a dictionary indexed by package FMRI string of - lists of LicenseAcceptanceError exceptions.""" - - return self.__errors - - def __str__(self): - """Returns a string representation of the license errors.""" - - output = "" - for sfmri in self.__errors: - output += ("-" * 40) + "\n" - output += _("Package: {0}\n\n").format(sfmri) - for e in self.__errors[sfmri]: - lic_name = e.dest.attrs["license"] - output += _("License: {0}\n").format(lic_name) - if e.dest.must_accept and not e.accepted: - output += _(" License requires " - "acceptance.") - if e.dest.must_display and not e.displayed: - output += _(" License must be viewed.") - output += "\n" - return output + """Used to indicate that image plan evaluation or execution failed due + to license-related errors.""" + + def __init__(self, pp_errors): + """'errors' should be a list of PkgLicenseErrors exceptions.""" + + PlanPrepareException.__init__(self) + self.__errors = pkgs = {} + for pp_err in pp_errors: + for e in pp_err.errors: + pkgs.setdefault(str(e.fmri), []).append(e) + + @property + def errors(self): + """Returns a dictionary indexed by package FMRI string of + lists of LicenseAcceptanceError exceptions.""" + + return self.__errors + + def __str__(self): + """Returns a string representation of the license errors.""" + + output = "" + for sfmri in self.__errors: + output += ("-" * 40) + "\n" + output += _("Package: {0}\n\n").format(sfmri) + for e in self.__errors[sfmri]: + lic_name = e.dest.attrs["license"] + output += _("License: {0}\n").format(lic_name) + if e.dest.must_accept and not e.accepted: + output += _(" License requires " "acceptance.") + if e.dest.must_display and not e.displayed: + output += _(" License must be viewed.") + output += "\n" + return output class InvalidVarcetNames(PlanPrepareException): - """Used to indicate that image plan evaluation or execution failed due - to illegal characters in variant/facet names.""" + """Used to indicate that image plan evaluation or execution failed due + to illegal characters in variant/facet names.""" - def __init__(self, invalid_names): - PlanPrepareException.__init__(self) - self.names = invalid_names + def __init__(self, invalid_names): + PlanPrepareException.__init__(self) + self.names = invalid_names - def __str__(self): - return _(", ".join(self.names) + " are not valid variant/facet " - "names; variant/facet names cannot contain whitespace.") + def __str__(self): + return _( + ", ".join(self.names) + " are not valid variant/facet " + "names; variant/facet names cannot contain whitespace." + ) class ActuatorException(ApiException): - def __init__(self, e): - ApiException.__init__(self) - self.exception = e + def __init__(self, e): + ApiException.__init__(self) + self.exception = e - def __str__(self): - return str(self.exception) + def __str__(self): + return str(self.exception) class PrematureExecutionException(ApiException): - pass + pass class AlreadyPreparedException(PlanPrepareException): - pass + pass class AlreadyExecutedException(ApiException): - pass + pass class ImageplanStateException(ApiException): - def __init__(self, state): - ApiException.__init__(self) - self.state = state + def __init__(self, state): + ApiException.__init__(self) + self.state = state class InvalidPlanError(ApiException): - """Used to indicate that the image plan is no longer valid, likely as a - result of an image state change since the plan was created.""" + """Used to indicate that the image plan is no longer valid, likely as a + result of an image state change since the plan was created.""" - def __str__(self): - return _("The plan for the current operation is no longer " - "valid. The image has likely been modified by another " - "process or client. Please try the operation again.") + def __str__(self): + return _( + "The plan for the current operation is no longer " + "valid. The image has likely been modified by another " + "process or client. Please try the operation again." + ) class ImagePkgStateError(ApiException): + def __init__(self, fmri, states): + ApiException.__init__(self) + self.fmri = fmri + self.states = states - def __init__(self, fmri, states): - ApiException.__init__(self) - self.fmri = fmri - self.states = states - - def __str__(self): - return _("Invalid package state change attempted '{states}' " - "for package '{fmri}'.").format(states=self.states, - fmri=self.fmri) + def __str__(self): + return _( + "Invalid package state change attempted '{states}' " + "for package '{fmri}'." + ).format(states=self.states, fmri=self.fmri) class IpkgOutOfDateException(ApiException): - def __str__(self): - return _("pkg(7) out of date") + def __str__(self): + return _("pkg(7) out of date") class ImageUpdateOnLiveImageException(ApiException): - def __str__(self): - return _("Requested operation cannot be performed " - "in live image.") + def __str__(self): + return _("Requested operation cannot be performed " "in live image.") class RebootNeededOnLiveImageException(ApiException): - def __str__(self): - return _("Requested operation cannot be performed " - "in live image.") + def __str__(self): + return _("Requested operation cannot be performed " "in live image.") class CanceledException(ApiException): - pass + pass + class PlanMissingException(ApiException): - pass + pass + class NoPackagesInstalledException(ApiException): - pass + pass + class PermissionsException(ApiException): - def __init__(self, path): - ApiException.__init__(self) - self.path = path - - def __str__(self): - if self.path: - return _("Could not operate on {0}\nbecause of " - "insufficient permissions. Please try the " - "command again as a privileged user.").format( - self.path) - else: - return _(""" + def __init__(self, path): + ApiException.__init__(self) + self.path = path + + def __str__(self): + if self.path: + return _( + "Could not operate on {0}\nbecause of " + "insufficient permissions. Please try the " + "command again as a privileged user." + ).format(self.path) + else: + return _( + """ Could not complete the operation because of insufficient permissions. Please try the command again as a privileged user. -""") +""" + ) + class FileInUseException(PermissionsException): - def __init__(self, path): - PermissionsException.__init__(self, path) - assert path + def __init__(self, path): + PermissionsException.__init__(self, path) + assert path - def __str__(self): - return _("Could not operate on {0}\nbecause the file is " - "in use. Please stop using the file and try the\n" - "operation again.").format(self.path) + def __str__(self): + return _( + "Could not operate on {0}\nbecause the file is " + "in use. Please stop using the file and try the\n" + "operation again." + ).format(self.path) class UnprivilegedUserError(PermissionsException): - def __init__(self, path): - PermissionsException.__init__(self, path) + def __init__(self, path): + PermissionsException.__init__(self, path) - def __str__(self): - return _("Insufficient access to complete the requested " - "operation.\nPlease try the operation again as a " - "privileged user.") + def __str__(self): + return _( + "Insufficient access to complete the requested " + "operation.\nPlease try the operation again as a " + "privileged user." + ) class ReadOnlyFileSystemException(PermissionsException): - """Used to indicate that the operation was attempted on a - read-only filesystem""" + """Used to indicate that the operation was attempted on a + read-only filesystem""" - def __init__(self, path): - ApiException.__init__(self) - self.path = path + def __init__(self, path): + ApiException.__init__(self) + self.path = path - def __str__(self): - if self.path: - return _("Could not complete the operation on {0}: " - "read-only filesystem.").format(self.path) - return _("Could not complete the operation: read-only " - "filesystem.") + def __str__(self): + if self.path: + return _( + "Could not complete the operation on {0}: " + "read-only filesystem." + ).format(self.path) + return _("Could not complete the operation: read-only " "filesystem.") class InvalidLockException(ApiException): - def __init__(self, path): - ApiException.__init__(self) - self.path = path + def __init__(self, path): + ApiException.__init__(self) + self.path = path - def __str__(self): - return _("Unable to obtain or operate on lock at {0}.\n" - "Please try the operation again as a privileged " - "user.").format(self.path) + def __str__(self): + return _( + "Unable to obtain or operate on lock at {0}.\n" + "Please try the operation again as a privileged " + "user." + ).format(self.path) class PackageMatchErrors(ApiException): - """Used to indicate which patterns were not matched or illegal during - a package name matching operation.""" - - def __init__(self, unmatched_fmris=EmptyI, multiple_matches=EmptyI, - illegal=EmptyI, multispec=EmptyI): - ApiException.__init__(self) - self.unmatched_fmris = unmatched_fmris - self.multiple_matches = multiple_matches - self.illegal = illegal - self.multispec = multispec - - def __str__(self): - res = [] - if self.unmatched_fmris: - s = _("The following pattern(s) did not match any " - "packages:") - - res += [s] - res += ["\t{0}".format(p) for p in self.unmatched_fmris] - - if self.multiple_matches: - s = _("'{0}' matches multiple packages") - for p, lst in self.multiple_matches: - res.append(s.format(p)) - for pfmri in lst: - res.append("\t{0}".format(pfmri)) - - if self.illegal: - s = _("'{0}' is an illegal FMRI") - res += [ s.format(p) for p in self.illegal ] - - if self.multispec: - s = _("The following different patterns specify the " - "same package(s):") - res += [s] - for t in self.multispec: - res += [ - ", ".join([t[i] for i in range(1, len(t))]) - + ": {0}".format(t[0]) - ] - - return "\n".join(res) + """Used to indicate which patterns were not matched or illegal during + a package name matching operation.""" + + def __init__( + self, + unmatched_fmris=EmptyI, + multiple_matches=EmptyI, + illegal=EmptyI, + multispec=EmptyI, + ): + ApiException.__init__(self) + self.unmatched_fmris = unmatched_fmris + self.multiple_matches = multiple_matches + self.illegal = illegal + self.multispec = multispec + + def __str__(self): + res = [] + if self.unmatched_fmris: + s = _("The following pattern(s) did not match any " "packages:") + + res += [s] + res += ["\t{0}".format(p) for p in self.unmatched_fmris] + + if self.multiple_matches: + s = _("'{0}' matches multiple packages") + for p, lst in self.multiple_matches: + res.append(s.format(p)) + for pfmri in lst: + res.append("\t{0}".format(pfmri)) + + if self.illegal: + s = _("'{0}' is an illegal FMRI") + res += [s.format(p) for p in self.illegal] + + if self.multispec: + s = _( + "The following different patterns specify the " + "same package(s):" + ) + res += [s] + for t in self.multispec: + res += [ + ", ".join([t[i] for i in range(1, len(t))]) + + ": {0}".format(t[0]) + ] + + return "\n".join(res) class PlanExecutionError(InvalidPlanError): - """Used to indicate that the requested operation could not be executed - due to unexpected changes in image state after planning was completed. - """ + """Used to indicate that the requested operation could not be executed + due to unexpected changes in image state after planning was completed. + """ - def __init__(self, paths): - self.paths = paths + def __init__(self, paths): + self.paths = paths - def __str__(self): - return _("The files listed below were modified after operation " - "planning was complete or were missing during plan " - "execution; this may indicate an administrative issue or " - "system configuration issue:\n{0}".format( - "\n".join(list(self.paths)))) + def __str__(self): + return _( + "The files listed below were modified after operation " + "planning was complete or were missing during plan " + "execution; this may indicate an administrative issue or " + "system configuration issue:\n{0}".format( + "\n".join(list(self.paths)) + ) + ) class PlanCreationException(ApiException): - def __init__(self, - already_installed=EmptyI, - badarch=EmptyI, - illegal=EmptyI, - installed=EmptyI, - invalid_mediations=EmptyI, - linked_pub_error=EmptyI, - missing_dependency=EmptyI, - missing_matches=EmptyI, - multiple_matches=EmptyI, - multispec=EmptyI, - no_solution=False, - no_tmp_origins=False, - no_version=EmptyI, - not_avoided=EmptyI, - nofiles=EmptyI, - obsolete=EmptyI, - pkg_updates_required=EmptyI, - rejected_pats=EmptyI, - solver_errors=EmptyI, - no_repo_pubs=EmptyI, - unmatched_fmris=EmptyI, - would_install=EmptyI, - wrong_publishers=EmptyI, - wrong_variants=EmptyI): - - ApiException.__init__(self) - self.already_installed = already_installed - self.badarch = badarch - self.illegal = illegal - self.installed = installed - self.invalid_mediations = invalid_mediations - self.linked_pub_error = linked_pub_error - self.missing_dependency = missing_dependency - self.missing_matches = missing_matches - self.multiple_matches = multiple_matches - self.multispec = multispec - self.no_solution = no_solution - self.no_tmp_origins = no_tmp_origins - self.no_version = no_version - self.not_avoided = not_avoided - self.nofiles = nofiles - self.obsolete = obsolete - self.pkg_updates_required = pkg_updates_required - self.rejected_pats = rejected_pats - self.solver_errors = solver_errors - self.unmatched_fmris = unmatched_fmris - self.no_repo_pubs = no_repo_pubs - self.would_install = would_install - self.wrong_publishers = wrong_publishers - self.wrong_variants = wrong_variants - - def __str__(self): - res = [] - if self.unmatched_fmris: - s = _("""\ + def __init__( + self, + already_installed=EmptyI, + badarch=EmptyI, + illegal=EmptyI, + installed=EmptyI, + invalid_mediations=EmptyI, + linked_pub_error=EmptyI, + missing_dependency=EmptyI, + missing_matches=EmptyI, + multiple_matches=EmptyI, + multispec=EmptyI, + no_solution=False, + no_tmp_origins=False, + no_version=EmptyI, + not_avoided=EmptyI, + nofiles=EmptyI, + obsolete=EmptyI, + pkg_updates_required=EmptyI, + rejected_pats=EmptyI, + solver_errors=EmptyI, + no_repo_pubs=EmptyI, + unmatched_fmris=EmptyI, + would_install=EmptyI, + wrong_publishers=EmptyI, + wrong_variants=EmptyI, + ): + ApiException.__init__(self) + self.already_installed = already_installed + self.badarch = badarch + self.illegal = illegal + self.installed = installed + self.invalid_mediations = invalid_mediations + self.linked_pub_error = linked_pub_error + self.missing_dependency = missing_dependency + self.missing_matches = missing_matches + self.multiple_matches = multiple_matches + self.multispec = multispec + self.no_solution = no_solution + self.no_tmp_origins = no_tmp_origins + self.no_version = no_version + self.not_avoided = not_avoided + self.nofiles = nofiles + self.obsolete = obsolete + self.pkg_updates_required = pkg_updates_required + self.rejected_pats = rejected_pats + self.solver_errors = solver_errors + self.unmatched_fmris = unmatched_fmris + self.no_repo_pubs = no_repo_pubs + self.would_install = would_install + self.wrong_publishers = wrong_publishers + self.wrong_variants = wrong_variants + + def __str__(self): + res = [] + if self.unmatched_fmris: + s = _( + """\ The following pattern(s) did not match any allowable packages. Try using a different matching pattern, or refreshing publisher information: -""") - res += [s] - res += ["\t{0}".format(p) for p in self.unmatched_fmris] +""" + ) + res += [s] + res += ["\t{0}".format(p) for p in self.unmatched_fmris] - if self.rejected_pats: - s = _("""\ + if self.rejected_pats: + s = _( + """\ The following pattern(s) only matched packages rejected by user request. Try using a different matching pattern, or refreshing publisher information: -""") - res += [s] - res += ["\t{0}".format(p) for p in self.rejected_pats] +""" + ) + res += [s] + res += ["\t{0}".format(p) for p in self.rejected_pats] - if self.wrong_variants: - s = _("""\ + if self.wrong_variants: + s = _( + """\ The following pattern(s) only matched packages that are not available -for the current image's architecture, zone type, and/or other variant:""") - res += [s] - res += ["\t{0}".format(p) for p in self.wrong_variants] - - if self.wrong_publishers: - s = _("The following patterns only matched packages " - "that are from publishers other than that which " - "supplied the already installed version of this package") - res += [s] - res += ["\t{0}: {1}".format(p[0], ", ".join(p[1])) for p in self.wrong_publishers] - - if self.multiple_matches: - s = _("'{0}' matches multiple packages") - for p, lst in self.multiple_matches: - res.append(s.format(p)) - for pfmri in lst: - res.append("\t{0}".format(pfmri)) - - if self.missing_matches: - s = _("'{0}' matches no installed packages") - res += [ s.format(p) for p in self.missing_matches ] - - if self.illegal: - s = _("'{0}' is an illegal fmri") - res += [ s.format(p) for p in self.illegal ] - - if self.badarch: - s = _("'{p}' supports the following architectures: " - "{archs}") - a = _("Image architecture is defined as: {0}") - res += [ s.format(p=self.badarch[0], - archs=", ".join(self.badarch[1]))] - res += [ a.format(self.badarch[2])] - - s = _("'{p}' depends on obsolete package '{op}'") - res += [ s.format(p=p, op=op) for p, op in self.obsolete ] - - if self.installed: - s = _("The proposed operation can not be performed for " - "the following package(s) as they are already " - "installed: ") - res += [s] - res += ["\t{0}".format(p) for p in self.installed] - - if self.invalid_mediations: - s = _("The following mediations are not syntactically " - "valid:") - for m, entries in six.iteritems(self.invalid_mediations): - for value, error in entries.values(): - res.append(error) - - if self.multispec: - s = _("The following patterns specify different " - "versions of the same package(s):") - res += [s] - for t in self.multispec: - res += [ - ", ".join( - [t[i] for i in range(1, len(t))]) - + ": {0}".format(t[0]) - ] - if self.no_solution: - res += [_("No solution was found to satisfy constraints")] - if isinstance(self.no_solution, list): - res.extend(self.no_solution) - - if self.pkg_updates_required: - s = _("""\ +for the current image's architecture, zone type, and/or other variant:""" + ) + res += [s] + res += ["\t{0}".format(p) for p in self.wrong_variants] + + if self.wrong_publishers: + s = _( + "The following patterns only matched packages " + "that are from publishers other than that which " + "supplied the already installed version of this package" + ) + res += [s] + res += [ + "\t{0}: {1}".format(p[0], ", ".join(p[1])) + for p in self.wrong_publishers + ] + + if self.multiple_matches: + s = _("'{0}' matches multiple packages") + for p, lst in self.multiple_matches: + res.append(s.format(p)) + for pfmri in lst: + res.append("\t{0}".format(pfmri)) + + if self.missing_matches: + s = _("'{0}' matches no installed packages") + res += [s.format(p) for p in self.missing_matches] + + if self.illegal: + s = _("'{0}' is an illegal fmri") + res += [s.format(p) for p in self.illegal] + + if self.badarch: + s = _("'{p}' supports the following architectures: " "{archs}") + a = _("Image architecture is defined as: {0}") + res += [ + s.format(p=self.badarch[0], archs=", ".join(self.badarch[1])) + ] + res += [a.format(self.badarch[2])] + + s = _("'{p}' depends on obsolete package '{op}'") + res += [s.format(p=p, op=op) for p, op in self.obsolete] + + if self.installed: + s = _( + "The proposed operation can not be performed for " + "the following package(s) as they are already " + "installed: " + ) + res += [s] + res += ["\t{0}".format(p) for p in self.installed] + + if self.invalid_mediations: + s = _("The following mediations are not syntactically " "valid:") + for m, entries in six.iteritems(self.invalid_mediations): + for value, error in entries.values(): + res.append(error) + + if self.multispec: + s = _( + "The following patterns specify different " + "versions of the same package(s):" + ) + res += [s] + for t in self.multispec: + res += [ + ", ".join([t[i] for i in range(1, len(t))]) + + ": {0}".format(t[0]) + ] + if self.no_solution: + res += [_("No solution was found to satisfy constraints")] + if isinstance(self.no_solution, list): + res.extend(self.no_solution) + + if self.pkg_updates_required: + s = _( + """\ Syncing this linked image would require the following package updates: -""") - res += [s] - for (oldfmri, newfmri) in self.pkg_updates_required: - res += ["{oldfmri} -> {newfmri}\n".format( - oldfmri=oldfmri, newfmri=newfmri)] - - if self.no_version: - res += self.no_version - - if self.no_tmp_origins: - s = _(""" +""" + ) + res += [s] + for oldfmri, newfmri in self.pkg_updates_required: + res += [ + "{oldfmri} -> {newfmri}\n".format( + oldfmri=oldfmri, newfmri=newfmri + ) + ] + + if self.no_version: + res += self.no_version + + if self.no_tmp_origins: + s = _( + """ The proposed operation on this parent image can not be performed because temporary origins were specified and this image has children. Please either retry the operation again without specifying any temporary origins, or if packages from additional origins are required, please configure those origins -persistently.""") - res = [s] - - if self.missing_dependency: - res += [_("Package {pkg} is missing a dependency: " - "{dep}").format( - pkg=self.missing_dependency[0], - dep=self.missing_dependency[1])] - if self.nofiles: - res += [_("The following files are not packaged in this image:")] - res += ["\t{0}".format(f) for f in self.nofiles] - - if self.solver_errors: - res += ["\n"] - res += [_("Solver dependency errors:")] - res.extend(self.solver_errors) - - if self.already_installed: - res += [_("The following packages are already " - "installed in this image; use uninstall to " - "avoid these:")] - res += [ "\t{0}".format(s) for s in self.already_installed] - - if self.would_install: - res += [_("The following packages are a target " - "of group dependencies; use install to unavoid " - "these:")] - res += [ "\t{0}".format(s) for s in self.would_install] - - if self.not_avoided: - res += [_("The following packages are not on the " - "avoid list, so they\ncannot be removed from it.")] - res += [ "\t{0}".format(s) for s in sorted(self.not_avoided)] - - def __format_li_pubs(pubs, res): - i = 0 - for pub, sticky in pubs: - s = " {0} {1:d}: {2}".format(_("PUBLISHER"), - i, pub) - mod = [] - if not sticky: - mod.append(_("non-sticky")) - if mod: - s += " ({0})".format(",".join(mod)) - res.append(s) - i += 1 - - if self.linked_pub_error: - res = [] - (pubs, parent_pubs) = self.linked_pub_error - - res.append(_(""" +persistently.""" + ) + res = [s] + + if self.missing_dependency: + res += [ + _("Package {pkg} is missing a dependency: " "{dep}").format( + pkg=self.missing_dependency[0], + dep=self.missing_dependency[1], + ) + ] + if self.nofiles: + res += [_("The following files are not packaged in this image:")] + res += ["\t{0}".format(f) for f in self.nofiles] + + if self.solver_errors: + res += ["\n"] + res += [_("Solver dependency errors:")] + res.extend(self.solver_errors) + + if self.already_installed: + res += [ + _( + "The following packages are already " + "installed in this image; use uninstall to " + "avoid these:" + ) + ] + res += ["\t{0}".format(s) for s in self.already_installed] + + if self.would_install: + res += [ + _( + "The following packages are a target " + "of group dependencies; use install to unavoid " + "these:" + ) + ] + res += ["\t{0}".format(s) for s in self.would_install] + + if self.not_avoided: + res += [ + _( + "The following packages are not on the " + "avoid list, so they\ncannot be removed from it." + ) + ] + res += ["\t{0}".format(s) for s in sorted(self.not_avoided)] + + def __format_li_pubs(pubs, res): + i = 0 + for pub, sticky in pubs: + s = " {0} {1:d}: {2}".format(_("PUBLISHER"), i, pub) + mod = [] + if not sticky: + mod.append(_("non-sticky")) + if mod: + s += " ({0})".format(",".join(mod)) + res.append(s) + i += 1 + + if self.linked_pub_error: + res = [] + (pubs, parent_pubs) = self.linked_pub_error + + res.append( + _( + """ Invalid child image publisher configuration. Child image publisher configuration must be a superset of the parent image publisher configuration. Please update the child publisher configuration to match the parent. If the child image is a zone this can be done automatically by detaching and attaching the zone. -The parent image has the following enabled publishers:""")) - __format_li_pubs(parent_pubs, res) - res.append(_(""" -The child image has the following enabled publishers:""")) - __format_li_pubs(pubs, res) - - if self.no_repo_pubs: - res += [_("The following publishers do not have any " - "configured package repositories and cannot be " - "used in package dehydration or rehydration " - "operations:\n")] - res += ["\t{0}".format(s) for s in sorted( - self.no_repo_pubs)] +The parent image has the following enabled publishers:""" + ) + ) + __format_li_pubs(parent_pubs, res) + res.append( + _( + """ +The child image has the following enabled publishers:""" + ) + ) + __format_li_pubs(pubs, res) + + if self.no_repo_pubs: + res += [ + _( + "The following publishers do not have any " + "configured package repositories and cannot be " + "used in package dehydration or rehydration " + "operations:\n" + ) + ] + res += ["\t{0}".format(s) for s in sorted(self.no_repo_pubs)] - return "\n".join(res) + return "\n".join(res) class ConflictingActionError(ApiException): - """Used to indicate that the imageplan would result in one or more sets - of conflicting actions, meaning that more than one action would exist on - the system with the same key attribute value in the same namespace. - There are three categories, each with its own subclass: + """Used to indicate that the imageplan would result in one or more sets + of conflicting actions, meaning that more than one action would exist on + the system with the same key attribute value in the same namespace. + There are three categories, each with its own subclass: + + - multiple files delivered to the same path or drivers, users, groups, + etc, delivered with the same key attribute; - - multiple files delivered to the same path or drivers, users, groups, - etc, delivered with the same key attribute; + - multiple objects delivered to the same path which aren't the same + type; - - multiple objects delivered to the same path which aren't the same - type; + - multiple directories, links, or hardlinks delivered to the same path + but with conflicting attributes. + """ - - multiple directories, links, or hardlinks delivered to the same path - but with conflicting attributes. - """ + def __init__(self, data): + self._data = data - def __init__(self, data): - self._data = data class ConflictingActionErrors(ApiException): - """A container for multiple ConflictingActionError exception objects - that can be raised as a single exception.""" + """A container for multiple ConflictingActionError exception objects + that can be raised as a single exception.""" - def __init__(self, errors): - self.__errors = errors + def __init__(self, errors): + self.__errors = errors + + def __str__(self): + return "\n\n".join((str(err) for err in self.__errors)) - def __str__(self): - return "\n\n".join((str(err) for err in self.__errors)) class DuplicateActionError(ConflictingActionError): - """Multiple actions of the same type have been delivered with the same - key attribute (when not allowed).""" - - def __str__(self): - pfmris = set((a[1] for a in self._data)) - kv = self._data[0][0].attrs[self._data[0][0].key_attr] - action = self._data[0][0].name - if len(pfmris) > 1: - s = _("The following packages all deliver {action} " - "actions to {kv}:\n").format(**locals()) - for a, p in self._data: - s += "\n {0}".format(p) - s += _("\n\nThese packages cannot be installed " - "together. Any non-conflicting subset\nof " - "the above packages can be installed.") - else: - pfmri = pfmris.pop() - s = _("The package {pfmri} delivers multiple copies " - "of {action} {kv}").format(**locals()) - s += _("\nThis package must be corrected before it " - "can be installed.") + """Multiple actions of the same type have been delivered with the same + key attribute (when not allowed).""" + + def __str__(self): + pfmris = set((a[1] for a in self._data)) + kv = self._data[0][0].attrs[self._data[0][0].key_attr] + action = self._data[0][0].name + if len(pfmris) > 1: + s = _( + "The following packages all deliver {action} " + "actions to {kv}:\n" + ).format(**locals()) + for a, p in self._data: + s += "\n {0}".format(p) + s += _( + "\n\nThese packages cannot be installed " + "together. Any non-conflicting subset\nof " + "the above packages can be installed." + ) + else: + pfmri = pfmris.pop() + s = _( + "The package {pfmri} delivers multiple copies " + "of {action} {kv}" + ).format(**locals()) + s += _( + "\nThis package must be corrected before it " + "can be installed." + ) + + return s - return s class InconsistentActionTypeError(ConflictingActionError): - """Multiple actions of different types have been delivered with the same - 'path' attribute. While this exception could represent other action - groups which share a single namespace, none such exist.""" - - def __str__(self): - ad = {} - pfmris = set() - kv = self._data[0][0].attrs[self._data[0][0].key_attr] - for a, p in self._data: - ad.setdefault(a.name, []).append(p) - pfmris.add(p) - - if len(pfmris) > 1: - s = _("The following packages deliver conflicting " - "action types to {0}:\n").format(kv) - for name, pl in six.iteritems(ad): - s += "\n {0}:".format(name) - s += "".join("\n {0}".format(p) for p in pl) - s += _("\n\nThese packages cannot be installed " - "together. Any non-conflicting subset\nof " - "the above packages can be installed.") - else: - pfmri = pfmris.pop() - types = list_to_lang(list(ad.keys())) - s = _("The package {pfmri} delivers conflicting " - "action types ({types}) to {kv}").format(**locals()) - s += _("\nThis package must be corrected before it " - "can be installed.") - return s + """Multiple actions of different types have been delivered with the same + 'path' attribute. While this exception could represent other action + groups which share a single namespace, none such exist.""" + + def __str__(self): + ad = {} + pfmris = set() + kv = self._data[0][0].attrs[self._data[0][0].key_attr] + for a, p in self._data: + ad.setdefault(a.name, []).append(p) + pfmris.add(p) + + if len(pfmris) > 1: + s = _( + "The following packages deliver conflicting " + "action types to {0}:\n" + ).format(kv) + for name, pl in six.iteritems(ad): + s += "\n {0}:".format(name) + s += "".join("\n {0}".format(p) for p in pl) + s += _( + "\n\nThese packages cannot be installed " + "together. Any non-conflicting subset\nof " + "the above packages can be installed." + ) + else: + pfmri = pfmris.pop() + types = list_to_lang(list(ad.keys())) + s = _( + "The package {pfmri} delivers conflicting " + "action types ({types}) to {kv}" + ).format(**locals()) + s += _( + "\nThis package must be corrected before it " + "can be installed." + ) + return s + class InconsistentActionAttributeError(ConflictingActionError): - """Multiple actions of the same type representing the same object have - have been delivered, but with conflicting attributes, such as two - directories at /usr with groups 'root' and 'sys', or two 'root' users - with uids '0' and '7'.""" - - def __str__(self): - actions = self._data - keyattr = actions[0][0].attrs[actions[0][0].key_attr] - actname = actions[0][0].name - - # Trim the action's attributes to only those required to be - # unique. - def ou(action): - ua = dict( - (k, v) - for k, v in six.iteritems(action.attrs) - if ((k in action.unique_attrs and - not (k == "preserve" and "overlay" in action.attrs)) or - ((action.name == "link" or action.name == "hardlink") and - k.startswith("mediator"))) - ) - action.attrs = ua - return action - - d = {} - for a in actions: - if a[0].attrs.get("implicit", "false") == "false": - d.setdefault(str(ou(a[0])), set()).add(a[1]) - l = sorted([ - (len(pkglist), action, pkglist) - for action, pkglist in six.iteritems(d) - ]) - - s = _("The requested change to the system attempts to install " - "multiple actions\nfor {a} '{k}' with conflicting " - "attributes:\n\n").format(a=actname, k=keyattr) - allpkgs = set() - for num, action, pkglist in l: - allpkgs.update(pkglist) - if num <= 5: - if num == 1: - t = _(" {n:d} package delivers '{a}':\n") - else: - t = _(" {n:d} packages deliver '{a}':\n") - s += t.format(n=num, a=action) - for pkg in sorted(pkglist): - s += _(" {0}\n").format(pkg) - else: - t = _(" {n:d} packages deliver '{a}', including:\n") - s += t.format(n=num, a=action) - for pkg in sorted(pkglist)[:5]: - s += _(" {0}\n").format(pkg) - - if len(allpkgs) == 1: - s += _("\nThis package must be corrected before it " - "can be installed.") + """Multiple actions of the same type representing the same object have + have been delivered, but with conflicting attributes, such as two + directories at /usr with groups 'root' and 'sys', or two 'root' users + with uids '0' and '7'.""" + + def __str__(self): + actions = self._data + keyattr = actions[0][0].attrs[actions[0][0].key_attr] + actname = actions[0][0].name + + # Trim the action's attributes to only those required to be + # unique. + def ou(action): + ua = dict( + (k, v) + for k, v in six.iteritems(action.attrs) + if ( + ( + k in action.unique_attrs + and not (k == "preserve" and "overlay" in action.attrs) + ) + or ( + (action.name == "link" or action.name == "hardlink") + and k.startswith("mediator") + ) + ) + ) + action.attrs = ua + return action + + d = {} + for a in actions: + if a[0].attrs.get("implicit", "false") == "false": + d.setdefault(str(ou(a[0])), set()).add(a[1]) + l = sorted( + [ + (len(pkglist), action, pkglist) + for action, pkglist in six.iteritems(d) + ] + ) + + s = _( + "The requested change to the system attempts to install " + "multiple actions\nfor {a} '{k}' with conflicting " + "attributes:\n\n" + ).format(a=actname, k=keyattr) + allpkgs = set() + for num, action, pkglist in l: + allpkgs.update(pkglist) + if num <= 5: + if num == 1: + t = _(" {n:d} package delivers '{a}':\n") else: - s += _("\n\nThese packages cannot be installed " - "together. Any non-conflicting subset\nof " - "the above packages can be installed.") - - return s + t = _(" {n:d} packages deliver '{a}':\n") + s += t.format(n=num, a=action) + for pkg in sorted(pkglist): + s += _(" {0}\n").format(pkg) + else: + t = _(" {n:d} packages deliver '{a}', including:\n") + s += t.format(n=num, a=action) + for pkg in sorted(pkglist)[:5]: + s += _(" {0}\n").format(pkg) + + if len(allpkgs) == 1: + s += _( + "\nThis package must be corrected before it " + "can be installed." + ) + else: + s += _( + "\n\nThese packages cannot be installed " + "together. Any non-conflicting subset\nof " + "the above packages can be installed." + ) + + return s class ImageBoundaryError(ApiException): - """Used to indicate that a file is delivered to image dir""" - - GENERIC = "generic" # generic image boundary violation - OUTSIDE_BE = "outside_be" # deliver items outside boot environment - RESERVED = "reserved" # deliver items to reserved dirs - - def __init__(self, fmri, actions=None): - """fmri is the package fmri - actions should be a dictionary of which key is the - error type and value is a list of actions""" - - ApiException.__init__(self) - self.fmri = fmri - generic = _("The following items are outside the boundaries " - "of the target image:\n\n") - outside_be = _("The following items are delivered outside " - "the target boot environment:\n\n") - reserved = _("The following items are delivered to " - "reserved directories:\n\n") - - self.message = { - self.GENERIC: generic, - self.OUTSIDE_BE: outside_be, - self.RESERVED: reserved - } - - if actions: - self.actions = actions + """Used to indicate that a file is delivered to image dir""" + + GENERIC = "generic" # generic image boundary violation + OUTSIDE_BE = "outside_be" # deliver items outside boot environment + RESERVED = "reserved" # deliver items to reserved dirs + + def __init__(self, fmri, actions=None): + """fmri is the package fmri + actions should be a dictionary of which key is the + error type and value is a list of actions""" + + ApiException.__init__(self) + self.fmri = fmri + generic = _( + "The following items are outside the boundaries " + "of the target image:\n\n" + ) + outside_be = _( + "The following items are delivered outside " + "the target boot environment:\n\n" + ) + reserved = _( + "The following items are delivered to " "reserved directories:\n\n" + ) + + self.message = { + self.GENERIC: generic, + self.OUTSIDE_BE: outside_be, + self.RESERVED: reserved, + } + + if actions: + self.actions = actions + else: + self.actions = {} + + def append_error(self, action, err_type=GENERIC): + """This function is used to append errors in the error + dictionary""" + + if action: + self.actions.setdefault(err_type, []).append(action) + + def isEmpty(self): + """Return whether error dictionary is empty""" + + return len(self.actions) == 0 + + def __str__(self): + error_list = [self.GENERIC, self.OUTSIDE_BE, self.RESERVED] + s = "" + for err_type in error_list: + if not err_type in self.actions: + continue + if self.actions[err_type]: + if err_type == self.GENERIC: + s += ( + "The package {0} delivers items" + " outside the boundaries of" + " the target image and can not be" + " installed.\n\n" + ).format(self.fmri) + elif err_type == self.OUTSIDE_BE: + s += ( + "The package {0} delivers items" + " outside the target boot" + " environment and can not be" + " installed.\n\n" + ).format(self.fmri) else: - self.actions = {} - - def append_error(self, action, err_type=GENERIC): - """This function is used to append errors in the error - dictionary""" - - if action: - self.actions.setdefault(err_type, []).append(action) - - def isEmpty(self): - """Return whether error dictionary is empty""" - - return len(self.actions) == 0 - - def __str__(self): - error_list = [self.GENERIC, self.OUTSIDE_BE, self.RESERVED] - s = "" - for err_type in error_list: - if not err_type in self.actions: - continue - if self.actions[err_type]: - if err_type == self.GENERIC: - s += ("The package {0} delivers items" - " outside the boundaries of" - " the target image and can not be" - " installed.\n\n").format(self.fmri) - elif err_type == self.OUTSIDE_BE: - s += ("The package {0} delivers items" - " outside the target boot" - " environment and can not be" - " installed.\n\n").format(self.fmri) - else: - s += ("The package {0} delivers items" - " to reserved directories and can" - " not be installed.\n\n").format(self.fmri) - s += self.message[err_type] - for action in self.actions[err_type]: - s += (" {0} {1}\n").format( - action.name, action.attrs["path"]) - return s + s += ( + "The package {0} delivers items" + " to reserved directories and can" + " not be installed.\n\n" + ).format(self.fmri) + s += self.message[err_type] + for action in self.actions[err_type]: + s += (" {0} {1}\n").format( + action.name, action.attrs["path"] + ) + return s class ImageBoundaryErrors(ApiException): - """A container for multiple ImageBoundaryError exception objects - that can be raised as a single exception.""" - - def __init__(self, errors): - ApiException.__init__(self) - self.__errors = errors - - generic = _("The following packages deliver items outside " - "the boundaries of the target image and can not be " - "installed:\n\n") - outside_be = _("The following packages deliver items outside " - "the target boot environment and can not be " - "installed:\n\n") - reserved = _("The following packages deliver items to reserved " - "directories and can not be installed:\n\n") - - self.message = { - ImageBoundaryError.GENERIC: generic, - ImageBoundaryError.OUTSIDE_BE: outside_be, - ImageBoundaryError.RESERVED: reserved - } - - def __str__(self): - if len(self.__errors) <= 1: - return "\n".join([str(err) for err in self.__errors]) - - s = "" - for err_type in self.message: - cur_errs = [] - for err in self.__errors: - # If err does not contain this error type - # we just ignore this. - if not err_type in err.actions or \ - not err.actions[err_type]: - continue - cur_errs.append(err) - - if not cur_errs: - continue - - if len(cur_errs) == 1: - s += str(cur_errs[0]) + "\n" - continue - - s += self.message[err_type] - for err in cur_errs: - s += (" {0}\n").format(err.fmri) - for action in err.actions[err_type]: - s += (" {0} {1}\n").format( - action.name, action.attrs["path"]) - s += "\n" - return s + """A container for multiple ImageBoundaryError exception objects + that can be raised as a single exception.""" + + def __init__(self, errors): + ApiException.__init__(self) + self.__errors = errors + + generic = _( + "The following packages deliver items outside " + "the boundaries of the target image and can not be " + "installed:\n\n" + ) + outside_be = _( + "The following packages deliver items outside " + "the target boot environment and can not be " + "installed:\n\n" + ) + reserved = _( + "The following packages deliver items to reserved " + "directories and can not be installed:\n\n" + ) + + self.message = { + ImageBoundaryError.GENERIC: generic, + ImageBoundaryError.OUTSIDE_BE: outside_be, + ImageBoundaryError.RESERVED: reserved, + } + + def __str__(self): + if len(self.__errors) <= 1: + return "\n".join([str(err) for err in self.__errors]) + + s = "" + for err_type in self.message: + cur_errs = [] + for err in self.__errors: + # If err does not contain this error type + # we just ignore this. + if not err_type in err.actions or not err.actions[err_type]: + continue + cur_errs.append(err) + + if not cur_errs: + continue + + if len(cur_errs) == 1: + s += str(cur_errs[0]) + "\n" + continue + + s += self.message[err_type] + for err in cur_errs: + s += (" {0}\n").format(err.fmri) + for action in err.actions[err_type]: + s += (" {0} {1}\n").format( + action.name, action.attrs["path"] + ) + s += "\n" + return s def list_to_lang(l): - """Takes a list of items and puts them into a string, with commas in - between items, and an "and" between the last two items. Special cases - for lists of two or fewer items, and uses the Oxford comma.""" - - if not l: - return "" - if len(l) == 1: - return l[0] - if len(l) == 2: - # Used for a two-element list - return _("{penultimate} and {ultimate}").format( - penultimate=l[0], - ultimate=l[1] - ) - # In order to properly i18n this construct, we create two templates: - # one for each element save the last, and one that tacks on the last - # element. - # 'elementtemplate' is for each element through the penultimate - elementtemplate = _("{0}, ") - # 'listtemplate' concatenates the concatenation of non-ultimate elements - # and the ultimate element. - listtemplate = _("{list}and {tail}") - return listtemplate.format( - list="".join(elementtemplate.format(i) for i in l[:-1]), - tail=l[-1] - ) + """Takes a list of items and puts them into a string, with commas in + between items, and an "and" between the last two items. Special cases + for lists of two or fewer items, and uses the Oxford comma.""" + + if not l: + return "" + if len(l) == 1: + return l[0] + if len(l) == 2: + # Used for a two-element list + return _("{penultimate} and {ultimate}").format( + penultimate=l[0], ultimate=l[1] + ) + # In order to properly i18n this construct, we create two templates: + # one for each element save the last, and one that tacks on the last + # element. + # 'elementtemplate' is for each element through the penultimate + elementtemplate = _("{0}, ") + # 'listtemplate' concatenates the concatenation of non-ultimate elements + # and the ultimate element. + listtemplate = _("{list}and {tail}") + return listtemplate.format( + list="".join(elementtemplate.format(i) for i in l[:-1]), tail=l[-1] + ) + class ActionExecutionError(ApiException): - """Used to indicate that action execution (such as install, remove, - etc.) failed even though the action is valid. - - In particular, this exception indicates that something went wrong in the - application (or unapplication) of the action to the system, and is most - likely not an error in the pkg(7) code.""" - - def __init__(self, action, details=None, error=None, fmri=None, - use_errno=None): - """'action' is the object for the action that failed during the - requested operation. - - 'details' is an optional message explaining what operation - failed, why it failed, and why it cannot continue. It should - also include a suggestion as to how to resolve the situation - if possible. - - 'error' is an optional exception object that may have been - raised when the operation failed. - - 'fmri' is an optional package FMRI indicating what package - was being operated on at the time the error occurred. - - 'use_errno' is an optional boolean value indicating whether - the strerror() text of the exception should be used. If - 'details' is provided, the default value is False, otherwise - True.""" - - assert (details or error) - self.action = action - self.details = details - self.error = error - self.fmri = fmri - if use_errno == None: - # If details were provided, don't use errno unless - # explicitly requested. - use_errno = not details - self.use_errno = use_errno - - @property - def errno(self): - if self.error and hasattr(self.error, "errno"): - return self.error.errno - return None - - def __str__(self): - errno = "" - if self.use_errno and self.errno is not None: - errno = "[errno {0:d}: {1}]".format(self.errno, - os.strerror(self.errno)) - - details = self.details or "" - - # Fall back on the wrapped exception if we don't have anything - # useful. - if not errno and not details: - return str(self.error) - - if errno and details: - details = "{0}: {1}".format(errno, details) - - if details and not self.fmri: - details = _("Requested operation failed for action " - "{action}:\n{details}").format( - action=self.action, - details=details) - elif details: - details = _("Requested operation failed for package " - "{fmri}:\n{details}").format(fmri=self.fmri, - details=details) - - # If we only have one of the two, no need for the colon. - return "{0}{1}".format(errno, details) + """Used to indicate that action execution (such as install, remove, + etc.) failed even though the action is valid. + + In particular, this exception indicates that something went wrong in the + application (or unapplication) of the action to the system, and is most + likely not an error in the pkg(7) code.""" + + def __init__( + self, action, details=None, error=None, fmri=None, use_errno=None + ): + """'action' is the object for the action that failed during the + requested operation. + + 'details' is an optional message explaining what operation + failed, why it failed, and why it cannot continue. It should + also include a suggestion as to how to resolve the situation + if possible. + + 'error' is an optional exception object that may have been + raised when the operation failed. + + 'fmri' is an optional package FMRI indicating what package + was being operated on at the time the error occurred. + + 'use_errno' is an optional boolean value indicating whether + the strerror() text of the exception should be used. If + 'details' is provided, the default value is False, otherwise + True.""" + + assert details or error + self.action = action + self.details = details + self.error = error + self.fmri = fmri + if use_errno == None: + # If details were provided, don't use errno unless + # explicitly requested. + use_errno = not details + self.use_errno = use_errno + + @property + def errno(self): + if self.error and hasattr(self.error, "errno"): + return self.error.errno + return None + + def __str__(self): + errno = "" + if self.use_errno and self.errno is not None: + errno = "[errno {0:d}: {1}]".format( + self.errno, os.strerror(self.errno) + ) + + details = self.details or "" + + # Fall back on the wrapped exception if we don't have anything + # useful. + if not errno and not details: + return str(self.error) + + if errno and details: + details = "{0}: {1}".format(errno, details) + + if details and not self.fmri: + details = _( + "Requested operation failed for action " "{action}:\n{details}" + ).format(action=self.action, details=details) + elif details: + details = _( + "Requested operation failed for package " "{fmri}:\n{details}" + ).format(fmri=self.fmri, details=details) + + # If we only have one of the two, no need for the colon. + return "{0}{1}".format(errno, details) class CatalogOriginRefreshException(ApiException): - def __init__(self, failed, total, errmessage=None): - ApiException.__init__(self) - self.failed = failed - self.total = total - self.errmessage = errmessage + def __init__(self, failed, total, errmessage=None): + ApiException.__init__(self) + self.failed = failed + self.total = total + self.errmessage = errmessage class CatalogRefreshException(ApiException): - def __init__(self, failed, total, succeeded, errmessage=None): - ApiException.__init__(self) - self.failed = failed - self.total = total - self.succeeded = succeeded - self.errmessage = errmessage + def __init__(self, failed, total, succeeded, errmessage=None): + ApiException.__init__(self) + self.failed = failed + self.total = total + self.succeeded = succeeded + self.errmessage = errmessage class CatalogError(ApiException): - """Base exception class for all catalog exceptions.""" + """Base exception class for all catalog exceptions.""" - def __init__(self, *args, **kwargs): - ApiException.__init__(self) - if args: - self.data = args[0] - else: - self.data = None - self._args = kwargs + def __init__(self, *args, **kwargs): + ApiException.__init__(self) + if args: + self.data = args[0] + else: + self.data = None + self._args = kwargs - def __str__(self): - return str(self.data) + def __str__(self): + return str(self.data) class AnarchicalCatalogFMRI(CatalogError): - """Used to indicate that the specified FMRI is not valid for catalog - operations because it is missing publisher information.""" + """Used to indicate that the specified FMRI is not valid for catalog + operations because it is missing publisher information.""" - def __str__(self): - return _("The FMRI '{0}' does not contain publisher information " - "and cannot be used for catalog operations.").format( - self.data) + def __str__(self): + return _( + "The FMRI '{0}' does not contain publisher information " + "and cannot be used for catalog operations." + ).format(self.data) class BadCatalogMetaRoot(CatalogError): - """Used to indicate an operation on the catalog's meta_root failed - because the meta_root is invalid.""" + """Used to indicate an operation on the catalog's meta_root failed + because the meta_root is invalid.""" - def __str__(self): - return _("Catalog meta_root '{root}' is invalid; unable " - "to complete operation: '{op}'.").format(root=self.data, - op=self._args.get("operation", None)) + def __str__(self): + return _( + "Catalog meta_root '{root}' is invalid; unable " + "to complete operation: '{op}'." + ).format(root=self.data, op=self._args.get("operation", None)) class BadCatalogPermissions(CatalogError): - """Used to indicate the server catalog files do not have the expected - permissions.""" - - def __init__(self, files): - """files should contain a list object with each entry consisting - of a tuple of filename, expected_mode, received_mode.""" - if not files: - files = [] - CatalogError.__init__(self, files) - - def __str__(self): - msg = _("The following catalog files have incorrect " - "permissions:\n") - for f in self.data: - fname, emode, fmode = f - msg += _("\t{fname}: expected mode: {emode}, found " - "mode: {fmode}\n").format(fname=fname, - emode=emode, fmode=fmode) - return msg + """Used to indicate the server catalog files do not have the expected + permissions.""" + + def __init__(self, files): + """files should contain a list object with each entry consisting + of a tuple of filename, expected_mode, received_mode.""" + if not files: + files = [] + CatalogError.__init__(self, files) + + def __str__(self): + msg = _("The following catalog files have incorrect " "permissions:\n") + for f in self.data: + fname, emode, fmode = f + msg += _( + "\t{fname}: expected mode: {emode}, found " "mode: {fmode}\n" + ).format(fname=fname, emode=emode, fmode=fmode) + return msg class BadCatalogSignatures(CatalogError): - """Used to indicate that the Catalog signatures are not valid.""" + """Used to indicate that the Catalog signatures are not valid.""" - def __str__(self): - return _("The signature data for the '{0}' catalog file is not " - "valid.").format(self.data) + def __str__(self): + return _( + "The signature data for the '{0}' catalog file is not " "valid." + ).format(self.data) class BadCatalogUpdateIdentity(CatalogError): - """Used to indicate that the requested catalog updates could not be - applied as the new catalog data is significantly different such that - the old catalog cannot be updated to match it.""" + """Used to indicate that the requested catalog updates could not be + applied as the new catalog data is significantly different such that + the old catalog cannot be updated to match it.""" - def __str__(self): - return _("Unable to determine the updates needed for " - "the current catalog using the provided catalog " - "update data in '{0}'.").format(self.data) + def __str__(self): + return _( + "Unable to determine the updates needed for " + "the current catalog using the provided catalog " + "update data in '{0}'." + ).format(self.data) class DuplicateCatalogEntry(CatalogError): - """Used to indicate that the specified catalog operation could not be - performed since it would result in a duplicate catalog entry.""" + """Used to indicate that the specified catalog operation could not be + performed since it would result in a duplicate catalog entry.""" - def __str__(self): - return _("Unable to perform '{op}' operation for catalog " - "{name}; completion would result in a duplicate entry " - "for package '{fmri}'.").format(op=self._args.get( - "operation", None), name=self._args.get("catalog_name", - None), fmri=self.data) + def __str__(self): + return _( + "Unable to perform '{op}' operation for catalog " + "{name}; completion would result in a duplicate entry " + "for package '{fmri}'." + ).format( + op=self._args.get("operation", None), + name=self._args.get("catalog_name", None), + fmri=self.data, + ) class CatalogUpdateRequirements(CatalogError): - """Used to indicate that an update request for the catalog could not - be performed because update requirements were not satisfied.""" + """Used to indicate that an update request for the catalog could not + be performed because update requirements were not satisfied.""" - def __str__(self): - return _("Catalog updates can only be applied to an on-disk " - "catalog.") + def __str__(self): + return _( + "Catalog updates can only be applied to an on-disk " "catalog." + ) class InvalidCatalogFile(CatalogError): - """Used to indicate a Catalog file could not be loaded.""" + """Used to indicate a Catalog file could not be loaded.""" - def __str__(self): - return _("Catalog file '{0}' is invalid.\nUse 'pkgrepo rebuild' " - "to create a new package catalog.").format(self.data) + def __str__(self): + return _( + "Catalog file '{0}' is invalid.\nUse 'pkgrepo rebuild' " + "to create a new package catalog." + ).format(self.data) class MismatchedCatalog(CatalogError): - """Used to indicate that a Catalog's attributes and parts do not - match. This is likely the result of an attributes file being - retrieved which doesn't match the parts that were retrieved such - as in a misconfigured or stale cache case.""" + """Used to indicate that a Catalog's attributes and parts do not + match. This is likely the result of an attributes file being + retrieved which doesn't match the parts that were retrieved such + as in a misconfigured or stale cache case.""" - def __str__(self): - return _("The content of the catalog for publisher '{0}' " - "doesn't match the catalog's attributes. This is " - "likely the result of a mix of older and newer " - "catalog files being provided for the publisher.").format( - self.data) + def __str__(self): + return _( + "The content of the catalog for publisher '{0}' " + "doesn't match the catalog's attributes. This is " + "likely the result of a mix of older and newer " + "catalog files being provided for the publisher." + ).format(self.data) class ObsoleteCatalogUpdate(CatalogError): - """Used to indicate that the specified catalog updates are for an older - version of the catalog and cannot be applied.""" + """Used to indicate that the specified catalog updates are for an older + version of the catalog and cannot be applied.""" - def __str__(self): - return _("Unable to determine the updates needed for the " - "catalog using the provided catalog update data in '{0}'. " - "The specified catalog updates are for an older version " - "of the catalog and cannot be used.").format(self.data) + def __str__(self): + return _( + "Unable to determine the updates needed for the " + "catalog using the provided catalog update data in '{0}'. " + "The specified catalog updates are for an older version " + "of the catalog and cannot be used." + ).format(self.data) class UnknownCatalogEntry(CatalogError): - """Used to indicate that an entry for the specified package FMRI or - pattern could not be found in the catalog.""" + """Used to indicate that an entry for the specified package FMRI or + pattern could not be found in the catalog.""" - def __str__(self): - return _("'{0}' could not be found in the catalog.").format( - self.data) + def __str__(self): + return _("'{0}' could not be found in the catalog.").format(self.data) class UnknownUpdateType(CatalogError): - """Used to indicate that the specified CatalogUpdate operation is - unknown.""" + """Used to indicate that the specified CatalogUpdate operation is + unknown.""" - def __str__(self): - return _("Unknown catalog update type '{0}'").format(self.data) + def __str__(self): + return _("Unknown catalog update type '{0}'").format(self.data) class UnrecognizedCatalogPart(CatalogError): - """Raised when the catalog finds a CatalogPart that is unrecognized - or invalid.""" + """Raised when the catalog finds a CatalogPart that is unrecognized + or invalid.""" - def __str__(self): - return _("Unrecognized, unknown, or invalid CatalogPart '{0}'").format( - self.data) + def __str__(self): + return _("Unrecognized, unknown, or invalid CatalogPart '{0}'").format( + self.data + ) class InventoryException(ApiException): - """Used to indicate that some of the specified patterns to a catalog - matching function did not match any catalog entries, or were invalid - patterns.""" - - def __init__(self, illegal=EmptyI, matcher=EmptyI, notfound=EmptyI, - publisher=EmptyI, version=EmptyI): - ApiException.__init__(self) - self.illegal = illegal - self.matcher = matcher - self.notfound = set(notfound) - self.publisher = publisher - self.version = version - - self.notfound.update(matcher) - self.notfound.update(publisher) - self.notfound.update(version) - self.notfound = sorted(list(self.notfound)) - - assert self.illegal or self.notfound - - def __str__(self): - outstr = "" - for x in self.illegal: - # Illegal FMRIs have their own __str__ method - outstr += "{0}\n".format(x) - - if self.matcher or self.publisher or self.version: - outstr += _("No matching package could be found for " - "the following FMRIs in any of the catalogs for " - "the current publishers:\n") - - for x in self.matcher: - outstr += \ - _("{0} (pattern did not match)\n").format(x) - for x in self.publisher: - outstr += _("{0} (publisher did not " - "match)\n").format(x) - for x in self.version: - outstr += \ - _("{0} (version did not match)\n").format(x) - return outstr + """Used to indicate that some of the specified patterns to a catalog + matching function did not match any catalog entries, or were invalid + patterns.""" + + def __init__( + self, + illegal=EmptyI, + matcher=EmptyI, + notfound=EmptyI, + publisher=EmptyI, + version=EmptyI, + ): + ApiException.__init__(self) + self.illegal = illegal + self.matcher = matcher + self.notfound = set(notfound) + self.publisher = publisher + self.version = version + + self.notfound.update(matcher) + self.notfound.update(publisher) + self.notfound.update(version) + self.notfound = sorted(list(self.notfound)) + + assert self.illegal or self.notfound + + def __str__(self): + outstr = "" + for x in self.illegal: + # Illegal FMRIs have their own __str__ method + outstr += "{0}\n".format(x) + + if self.matcher or self.publisher or self.version: + outstr += _( + "No matching package could be found for " + "the following FMRIs in any of the catalogs for " + "the current publishers:\n" + ) + + for x in self.matcher: + outstr += _("{0} (pattern did not match)\n").format(x) + for x in self.publisher: + outstr += _("{0} (publisher did not " "match)\n").format(x) + for x in self.version: + outstr += _("{0} (version did not match)\n").format(x) + return outstr # SearchExceptions + class SearchException(ApiException): - """Based class used for all search-related api exceptions.""" - pass + """Based class used for all search-related api exceptions.""" + + pass class MalformedSearchRequest(SearchException): - """Raised when the server cannot understand the format of the - search request.""" + """Raised when the server cannot understand the format of the + search request.""" - def __init__(self, url): - SearchException.__init__(self) - self.url = url + def __init__(self, url): + SearchException.__init__(self) + self.url = url - def __str__(self): - return str(self.url) + def __str__(self): + return str(self.url) class NegativeSearchResult(SearchException): - """Returned when the search cannot find any matches.""" + """Returned when the search cannot find any matches.""" - def __init__(self, url): - SearchException.__init__(self) - self.url = url + def __init__(self, url): + SearchException.__init__(self) + self.url = url - def __str__(self): - return _("The search at url {0} returned no results.").format( - self.url) + def __str__(self): + return _("The search at url {0} returned no results.").format(self.url) class ProblematicSearchServers(SearchException): - """This class wraps exceptions which could appear while trying to - do a search request.""" - - def __init__(self, failed=EmptyI, invalid=EmptyI, unsupported=EmptyI): - SearchException.__init__(self) - self.failed_servers = failed - self.invalid_servers = invalid - self.unsupported_servers = unsupported - - def __str__(self): - s = _("Some repositories failed to respond appropriately:\n") - for pub, err in self.failed_servers: - s += _("{o}:\n{msg}\n").format( - o=pub, msg=err) - for pub in self.invalid_servers: - s += _("{0} did not return a valid " - "response.\n".format(pub)) - if len(self.unsupported_servers) > 0: - s += _("Some repositories don't support requested " - "search operation:\n") - for pub, err in self.unsupported_servers: - s += _("{o}:\n{msg}\n").format( - o=pub, msg=err) - - return s + """This class wraps exceptions which could appear while trying to + do a search request.""" + + def __init__(self, failed=EmptyI, invalid=EmptyI, unsupported=EmptyI): + SearchException.__init__(self) + self.failed_servers = failed + self.invalid_servers = invalid + self.unsupported_servers = unsupported + + def __str__(self): + s = _("Some repositories failed to respond appropriately:\n") + for pub, err in self.failed_servers: + s += _("{o}:\n{msg}\n").format(o=pub, msg=err) + for pub in self.invalid_servers: + s += _("{0} did not return a valid " "response.\n".format(pub)) + if len(self.unsupported_servers) > 0: + s += _( + "Some repositories don't support requested " + "search operation:\n" + ) + for pub, err in self.unsupported_servers: + s += _("{o}:\n{msg}\n").format(o=pub, msg=err) + + return s class SlowSearchUsed(SearchException): - """This exception is thrown when a local search is performed without - an index. It's raised after all results have been yielded.""" + """This exception is thrown when a local search is performed without + an index. It's raised after all results have been yielded.""" - def __str__(self): - return _("Search performance is degraded.\n" - "Run 'pkg rebuild-index' to improve search speed.") + def __str__(self): + return _( + "Search performance is degraded.\n" + "Run 'pkg rebuild-index' to improve search speed." + ) @total_ordering class UnsupportedSearchError(SearchException): - """Returned when a search protocol is not supported by the - remote server.""" - - def __init__(self, url=None, proto=None): - SearchException.__init__(self) - self.url = url - self.proto = proto - - def __str__(self): - s = _("Search repository does not support the requested " - "protocol:") - if self.url: - s += "\nRepository URL: {0}".format(self.url) - if self.proto: - s += "\nRequested operation: {0}".format(self.proto) - return s - - def __eq__(self, other): - if not isinstance(other, UnsupportedSearchError): - return False - return self.url == other.url and \ - self.proto == other.proto - - def __le__(self, other): - if not isinstance(other, UnsupportedSearchError): - return True - if self.url < other.url: - return True - if self.url != other.url: - return False - return self.proto < other.proto - - def __hash__(self): - return hash((self.url, self.proto)) + """Returned when a search protocol is not supported by the + remote server.""" + + def __init__(self, url=None, proto=None): + SearchException.__init__(self) + self.url = url + self.proto = proto + + def __str__(self): + s = _("Search repository does not support the requested " "protocol:") + if self.url: + s += "\nRepository URL: {0}".format(self.url) + if self.proto: + s += "\nRequested operation: {0}".format(self.proto) + return s + + def __eq__(self, other): + if not isinstance(other, UnsupportedSearchError): + return False + return self.url == other.url and self.proto == other.proto + + def __le__(self, other): + if not isinstance(other, UnsupportedSearchError): + return True + if self.url < other.url: + return True + if self.url != other.url: + return False + return self.proto < other.proto + + def __hash__(self): + return hash((self.url, self.proto)) # IndexingExceptions. + class IndexingException(SearchException): - """ The base class for all exceptions that can occur while indexing. """ + """The base class for all exceptions that can occur while indexing.""" - def __init__(self, private_exception): - SearchException.__init__(self) - self.cause = private_exception.cause + def __init__(self, private_exception): + SearchException.__init__(self) + self.cause = private_exception.cause class CorruptedIndexException(IndexingException): - """This is used when the index is not in a correct state.""" - def __str__(self): - return _("The search index appears corrupted.") + """This is used when the index is not in a correct state.""" + + def __str__(self): + return _("The search index appears corrupted.") class InconsistentIndexException(IndexingException): - """This is used when the existing index is found to have inconsistent - versions.""" - def __init__(self, e): - IndexingException.__init__(self, e) - self.exception = e + """This is used when the existing index is found to have inconsistent + versions.""" + + def __init__(self, e): + IndexingException.__init__(self, e) + self.exception = e - def __str__(self): - return str(self.exception) + def __str__(self): + return str(self.exception) class IndexLockedException(IndexingException): - """This is used when an attempt to modify an index locked by another - process or thread is made.""" + """This is used when an attempt to modify an index locked by another + process or thread is made.""" - def __init__(self, e): - IndexingException.__init__(self, e) - self.exception = e + def __init__(self, e): + IndexingException.__init__(self, e) + self.exception = e - def __str__(self): - return str(self.exception) + def __str__(self): + return str(self.exception) class ProblematicPermissionsIndexException(IndexingException): - """ This is used when the indexer is unable to create, move, or remove - files or directories it should be able to. """ - def __str__(self): - return "Could not remove or create " \ - "{0} because of incorrect " \ - "permissions. Please correct this issue then " \ - "rebuild the index.".format(self.cause) + """This is used when the indexer is unable to create, move, or remove + files or directories it should be able to.""" + + def __str__(self): + return ( + "Could not remove or create " + "{0} because of incorrect " + "permissions. Please correct this issue then " + "rebuild the index.".format(self.cause) + ) + class WrapIndexingException(ApiException): - """This exception is used to wrap an indexing exception during install, - uninstall, or update so that a more appropriate error message can be - displayed to the user.""" + """This exception is used to wrap an indexing exception during install, + uninstall, or update so that a more appropriate error message can be + displayed to the user.""" - def __init__(self, e, tb, stack): - ApiException.__init__(self) - self.wrapped = e - self.tb = tb - self.stack = stack + def __init__(self, e, tb, stack): + ApiException.__init__(self) + self.wrapped = e + self.tb = tb + self.stack = stack - def __str__(self): - tmp = self.tb.split("\n") - res = tmp[:1] + [s.rstrip("\n") for s in self.stack] + tmp[1:] - return "\n".join(res) + def __str__(self): + tmp = self.tb.split("\n") + res = tmp[:1] + [s.rstrip("\n") for s in self.stack] + tmp[1:] + return "\n".join(res) class WrapSuccessfulIndexingException(WrapIndexingException): - """This exception is used to wrap an indexing exception during install, - uninstall, or update which was recovered from by performing a full - reindex.""" - pass + """This exception is used to wrap an indexing exception during install, + uninstall, or update which was recovered from by performing a full + reindex.""" + + pass # Query Parsing Exceptions class BooleanQueryException(ApiException): - """This exception is used when the children of a boolean operation - have different return types. The command 'pkg search foo AND ' - is the simplest example of this.""" + """This exception is used when the children of a boolean operation + have different return types. The command 'pkg search foo AND ' + is the simplest example of this.""" - def __init__(self, e): - ApiException.__init__(self) - self.e = e + def __init__(self, e): + ApiException.__init__(self) + self.e = e - def __str__(self): - return str(self.e) + def __str__(self): + return str(self.e) class ParseError(ApiException): - def __init__(self, e): - ApiException.__init__(self) - self.e = e + def __init__(self, e): + ApiException.__init__(self) + self.e = e - def __str__(self): - return str(self.e) + def __str__(self): + return str(self.e) class NonLeafPackageException(ApiException): - """Removal of a package which satisfies dependencies has been attempted. - - The first argument to the constructor is the FMRI which we tried to - remove, and is available as the "fmri" member of the exception. The - second argument is the list of dependent packages that prevent the - removal of the package, and is available as the "dependents" member. - """ - - def __init__(self, *args): - ApiException.__init__(self, *args) - - self.fmri = args[0] - self.dependents = args[1] - - def __str__(self): - s = _("Unable to remove '{0}' due to the following packages " - "that depend on it:\n").format(self.fmri.get_short_fmri( - anarchy=True, include_scheme=False)) - skey = operator.attrgetter('pkg_name') - s += "\n".join( - " {0}".format(f.get_short_fmri(anarchy=True, - include_scheme=False)) - for f in sorted(self.dependents, key=skey) - ) - return s + """Removal of a package which satisfies dependencies has been attempted. + The first argument to the constructor is the FMRI which we tried to + remove, and is available as the "fmri" member of the exception. The + second argument is the list of dependent packages that prevent the + removal of the package, and is available as the "dependents" member. + """ -def _str_autofix(self): + def __init__(self, *args): + ApiException.__init__(self, *args) + + self.fmri = args[0] + self.dependents = args[1] + + def __str__(self): + s = _( + "Unable to remove '{0}' due to the following packages " + "that depend on it:\n" + ).format(self.fmri.get_short_fmri(anarchy=True, include_scheme=False)) + skey = operator.attrgetter("pkg_name") + s += "\n".join( + " {0}".format(f.get_short_fmri(anarchy=True, include_scheme=False)) + for f in sorted(self.dependents, key=skey) + ) + return s - if getattr(self, "_autofix_pkgs", []): - s = _("\nThis is happening because the following " - "packages needed to be repaired as\npart of this " - "operation:\n\n ") - s += "\n ".join(str(f) for f in self._autofix_pkgs) - s += _("\n\nYou will need to reestablish your access to the " - "repository or remove the\npackages in the list above.") - return s - return "" + +def _str_autofix(self): + if getattr(self, "_autofix_pkgs", []): + s = _( + "\nThis is happening because the following " + "packages needed to be repaired as\npart of this " + "operation:\n\n " + ) + s += "\n ".join(str(f) for f in self._autofix_pkgs) + s += _( + "\n\nYou will need to reestablish your access to the " + "repository or remove the\npackages in the list above." + ) + return s + return "" class InvalidDepotResponseException(ApiException): - """Raised when the depot doesn't have versions of operations - that the client needs to operate successfully.""" - def __init__(self, url, data): - ApiException.__init__(self) - self.url = url - self.data = data + """Raised when the depot doesn't have versions of operations + that the client needs to operate successfully.""" - def __str__(self): - s = _("Unable to contact valid package repository") - if self.url: - s += _(": {0}").format(self.url) - if self.data: - s += ("\nEncountered the following error(s):\n{0}").format( - self.data) + def __init__(self, url, data): + ApiException.__init__(self) + self.url = url + self.data = data - s += _str_autofix(self) + def __str__(self): + s = _("Unable to contact valid package repository") + if self.url: + s += _(": {0}").format(self.url) + if self.data: + s += ("\nEncountered the following error(s):\n{0}").format( + self.data + ) - return s + s += _str_autofix(self) + + return s class DataError(ApiException): - """Base exception class used for all data related errors.""" + """Base exception class used for all data related errors.""" - def __init__(self, *args, **kwargs): - ApiException.__init__(self, *args) - if args: - self.data = args[0] - else: - self.data = None - self._args = kwargs + def __init__(self, *args, **kwargs): + ApiException.__init__(self, *args) + if args: + self.data = args[0] + else: + self.data = None + self._args = kwargs class InvalidP5IFile(DataError): - """Used to indicate that the specified location does not contain a - valid p5i-formatted file.""" - - def __str__(self): - if self.data: - return _("The provided p5i data is in an unrecognized " - "format or does not contain valid publisher " - "information: {0}").format(self.data) - return _("The provided p5i data is in an unrecognized format " - "or does not contain valid publisher information.") + """Used to indicate that the specified location does not contain a + valid p5i-formatted file.""" + + def __str__(self): + if self.data: + return _( + "The provided p5i data is in an unrecognized " + "format or does not contain valid publisher " + "information: {0}" + ).format(self.data) + return _( + "The provided p5i data is in an unrecognized format " + "or does not contain valid publisher information." + ) class InvalidP5SFile(DataError): - """Used to indicate that the specified location does not contain a - valid p5i-formatted file.""" - - def __str__(self): - if self.data: - return _("The provided p5s data is in an unrecognized " - "format or does not contain valid publisher " - "information: {0}").format(self.data) - return _("The provided p5s data is in an unrecognized format " - "or does not contain valid publisher information.") + """Used to indicate that the specified location does not contain a + valid p5i-formatted file.""" + + def __str__(self): + if self.data: + return _( + "The provided p5s data is in an unrecognized " + "format or does not contain valid publisher " + "information: {0}" + ).format(self.data) + return _( + "The provided p5s data is in an unrecognized format " + "or does not contain valid publisher information." + ) class UnsupportedP5IFile(DataError): - """Used to indicate that an attempt to read an unsupported version - of pkg(7) info file was attempted.""" + """Used to indicate that an attempt to read an unsupported version + of pkg(7) info file was attempted.""" - def __str__(self): - return _("Unsupported pkg(7) publisher information data " - "format.") + def __str__(self): + return _("Unsupported pkg(7) publisher information data " "format.") class UnsupportedP5SFile(DataError): - """Used to indicate that an attempt to read an unsupported version - of pkg(7) info file was attempted.""" + """Used to indicate that an attempt to read an unsupported version + of pkg(7) info file was attempted.""" - def __str__(self): - return _("Unsupported pkg(7) publisher and image information " - "data format.") + def __str__(self): + return _( + "Unsupported pkg(7) publisher and image information " "data format." + ) class UnsupportedP5SVersion(ApiException): - """Used to indicate that an attempt to read an unsupported version - of pkg(7) info file was attempted.""" + """Used to indicate that an attempt to read an unsupported version + of pkg(7) info file was attempted.""" - def __init__(self, v): - self.version = v + def __init__(self, v): + self.version = v - def __str__(self): - return _("{0} is not a supported version for creating a " - "syspub response.").format(self.version) + def __str__(self): + return _( + "{0} is not a supported version for creating a " "syspub response." + ).format(self.version) class TransportError(ApiException): - """Abstract exception class for all transport exceptions. - Specific transport exceptions should be implemented in the - transport code. Callers wishing to catch transport exceptions - should use this class. Subclasses must implement all methods - defined here that raise NotImplementedError.""" + """Abstract exception class for all transport exceptions. + Specific transport exceptions should be implemented in the + transport code. Callers wishing to catch transport exceptions + should use this class. Subclasses must implement all methods + defined here that raise NotImplementedError.""" - def __str__(self): - raise NotImplementedError() + def __str__(self): + raise NotImplementedError() - def _str_autofix(self): - return _str_autofix(self) + def _str_autofix(self): + return _str_autofix(self) class RetrievalError(ApiException): - """Used to indicate that a a requested resource could not be - retrieved.""" + """Used to indicate that a a requested resource could not be + retrieved.""" - def __init__(self, data, location=None): - ApiException.__init__(self) - self.data = data - self.location = location + def __init__(self, data, location=None): + ApiException.__init__(self) + self.data = data + self.location = location - def __str__(self): - if self.location: - return _("Error encountered while retrieving data from " - "'{location}':\n{data}").format( - location=self.location, data=self.data) - return _("Error encountered while retrieving data from: {0}").format( - self.data) + def __str__(self): + if self.location: + return _( + "Error encountered while retrieving data from " + "'{location}':\n{data}" + ).format(location=self.location, data=self.data) + return _("Error encountered while retrieving data from: {0}").format( + self.data + ) class InvalidResourceLocation(ApiException): - """Used to indicate that an invalid transport location was provided.""" + """Used to indicate that an invalid transport location was provided.""" + + def __init__(self, data): + ApiException.__init__(self) + self.data = data - def __init__(self, data): - ApiException.__init__(self) - self.data = data + def __str__(self): + return _("'{0}' is not a valid location.").format(self.data) - def __str__(self): - return _("'{0}' is not a valid location.").format(self.data) class BEException(ApiException): - def __init__(self): - ApiException.__init__(self) + def __init__(self): + ApiException.__init__(self) + class InvalidBENameException(BEException): - def __init__(self, be_name): - BEException.__init__(self) - self.be_name = be_name + def __init__(self, be_name): + BEException.__init__(self) + self.be_name = be_name + + def __str__(self): + return _("'{0}' is not a valid boot environment name.").format( + self.be_name + ) - def __str__(self): - return _("'{0}' is not a valid boot environment name.").format( - self.be_name) class DuplicateBEName(BEException): - """Used to indicate that there is an existing boot environment - with this name""" + """Used to indicate that there is an existing boot environment + with this name""" - def __init__(self, be_name): - BEException.__init__(self) - self.be_name = be_name + def __init__(self, be_name): + BEException.__init__(self) + self.be_name = be_name + + def __str__(self): + return _("The boot environment '{0}' already exists.").format( + self.be_name + ) - def __str__(self): - return _("The boot environment '{0}' already exists.").format( - self.be_name) class BENamingNotSupported(BEException): - def __init__(self, be_name): - BEException.__init__(self) - self.be_name = be_name + def __init__(self, be_name): + BEException.__init__(self) + self.be_name = be_name - def __str__(self): - return _("""\ + def __str__(self): + return _( + """\ Boot environment naming during package install is not supported on this -version of OpenSolaris. Please update without the --be-name option.""") +version of OpenSolaris. Please update without the --be-name option.""" + ) + class UnableToCopyBE(BEException): - def __str__(self): - return _("Unable to clone the current boot environment.") + def __str__(self): + return _("Unable to clone the current boot environment.") + class UnableToRenameBE(BEException): - def __init__(self, orig, dest): - BEException.__init__(self) - self.original_name = orig - self.destination_name = dest - - def __str__(self): - d = { - "orig": self.original_name, - "dest": self.destination_name - } - return _("""\ + def __init__(self, orig, dest): + BEException.__init__(self) + self.original_name = orig + self.destination_name = dest + + def __str__(self): + d = {"orig": self.original_name, "dest": self.destination_name} + return _( + """\ A problem occurred while attempting to rename the boot environment -currently named {orig} to {dest}.""").format(**d) +currently named {orig} to {dest}.""" + ).format(**d) + class UnableToMountBE(BEException): - def __init__(self, be_name, be_dir): - BEException.__init__(self) - self.name = be_name - self.mountpoint = be_dir + def __init__(self, be_name, be_dir): + BEException.__init__(self) + self.name = be_name + self.mountpoint = be_dir + + def __str__(self): + return _("Unable to mount {name} at {mt}").format( + name=self.name, mt=self.mountpoint + ) - def __str__(self): - return _("Unable to mount {name} at {mt}").format( - name=self.name, mt=self.mountpoint) class BENameGivenOnDeadBE(BEException): - def __init__(self, be_name): - BEException.__init__(self) - self.name = be_name + def __init__(self, be_name): + BEException.__init__(self) + self.name = be_name - def __str__(self): - return _("""\ + def __str__(self): + return _( + """\ Naming a boot environment when operating on a non-live image is -not allowed.""") +not allowed.""" + ) class UnrecognizedOptionsToInfo(ApiException): - def __init__(self, opts): - ApiException.__init__(self) - self._opts = opts + def __init__(self, opts): + ApiException.__init__(self) + self._opts = opts + + def __str__(self): + s = _("Info does not recognize the following options:") + for o in self._opts: + s += _(" '") + str(o) + _("'") + return s - def __str__(self): - s = _("Info does not recognize the following options:") - for o in self._opts: - s += _(" '") + str(o) + _("'") - return s class IncorrectIndexFileHash(ApiException): - """This is used when the index hash value doesn't match the hash of the - packages installed in the image.""" - pass + """This is used when the index hash value doesn't match the hash of the + packages installed in the image.""" + + pass class PublisherError(ApiException): - """Base exception class for all publisher exceptions.""" + """Base exception class for all publisher exceptions.""" - def __init__(self, *args, **kwargs): - ApiException.__init__(self, *args) - if args: - self.data = args[0] - else: - self.data = None - self._args = kwargs + def __init__(self, *args, **kwargs): + ApiException.__init__(self, *args) + if args: + self.data = args[0] + else: + self.data = None + self._args = kwargs - def __str__(self): - return str(self.data) + def __str__(self): + return str(self.data) class BadPublisherMetaRoot(PublisherError): - """Used to indicate an operation on the publisher's meta_root failed - because the meta_root is invalid.""" + """Used to indicate an operation on the publisher's meta_root failed + because the meta_root is invalid.""" - def __str__(self): - return _("Publisher meta_root '{root}' is invalid; unable " - "to complete operation: '{op}'.").format(root=self.data, - op=self._args.get("operation", None)) + def __str__(self): + return _( + "Publisher meta_root '{root}' is invalid; unable " + "to complete operation: '{op}'." + ).format(root=self.data, op=self._args.get("operation", None)) class BadPublisherAlias(PublisherError): - """Used to indicate that a publisher alias is not valid.""" + """Used to indicate that a publisher alias is not valid.""" - def __str__(self): - return _("'{0}' is not a valid publisher alias.").format( - self.data) + def __str__(self): + return _("'{0}' is not a valid publisher alias.").format(self.data) class BadPublisherPrefix(PublisherError): - """Used to indicate that a publisher name is not valid.""" + """Used to indicate that a publisher name is not valid.""" - def __str__(self): - return _("'{0}' is not a valid publisher name.").format( - self.data) + def __str__(self): + return _("'{0}' is not a valid publisher name.").format(self.data) class ReservedPublisherPrefix(PublisherError): - """Used to indicate that a publisher name is not valid.""" + """Used to indicate that a publisher name is not valid.""" - def __str__(self): - fmri = self._args["fmri"] - return _("'{pkg_pub}' is a reserved publisher and does not " - "contain the requested package: pkg:/{pkg_name}").format( - pkg_pub=fmri.publisher, pkg_name=fmri.pkg_name) + def __str__(self): + fmri = self._args["fmri"] + return _( + "'{pkg_pub}' is a reserved publisher and does not " + "contain the requested package: pkg:/{pkg_name}" + ).format(pkg_pub=fmri.publisher, pkg_name=fmri.pkg_name) class BadRepositoryAttributeValue(PublisherError): - """Used to indicate that the specified repository attribute value is - invalid.""" + """Used to indicate that the specified repository attribute value is + invalid.""" - def __str__(self): - return _("'{value}' is not a valid value for repository " - "attribute '{attribute}'.").format( - value=self._args["value"], attribute=self.data) + def __str__(self): + return _( + "'{value}' is not a valid value for repository " + "attribute '{attribute}'." + ).format(value=self._args["value"], attribute=self.data) class BadRepositoryCollectionType(PublisherError): - """Used to indicate that the specified repository collection type is - invalid.""" + """Used to indicate that the specified repository collection type is + invalid.""" - def __init__(self, *args, **kwargs): - PublisherError.__init__(self, *args, **kwargs) + def __init__(self, *args, **kwargs): + PublisherError.__init__(self, *args, **kwargs) - def __str__(self): - return _("'{0}' is not a valid repository collection type.").format( - self.data) + def __str__(self): + return _("'{0}' is not a valid repository collection type.").format( + self.data + ) class BadRepositoryURI(PublisherError): - """Used to indicate that a repository URI is not syntactically valid.""" + """Used to indicate that a repository URI is not syntactically valid.""" - def __str__(self): - return _("'{0}' is not a valid URI.").format(self.data) + def __str__(self): + return _("'{0}' is not a valid URI.").format(self.data) class BadRepositoryURIPriority(PublisherError): - """Used to indicate that the priority specified for a repository URI is - not valid.""" + """Used to indicate that the priority specified for a repository URI is + not valid.""" - def __str__(self): - return _("'{0}' is not a valid URI priority; integer value " - "expected.").format(self.data) + def __str__(self): + return _( + "'{0}' is not a valid URI priority; integer value " "expected." + ).format(self.data) class BadRepositoryURISortPolicy(PublisherError): - """Used to indicate that the specified repository URI sort policy is - invalid.""" + """Used to indicate that the specified repository URI sort policy is + invalid.""" - def __init__(self, *args, **kwargs): - PublisherError.__init__(self, *args, **kwargs) + def __init__(self, *args, **kwargs): + PublisherError.__init__(self, *args, **kwargs) - def __str__(self): - return _("'{0}' is not a valid repository URI sort policy.").format( - self.data) + def __str__(self): + return _("'{0}' is not a valid repository URI sort policy.").format( + self.data + ) class DisabledPublisher(PublisherError): - """Used to indicate that an attempt to use a disabled publisher occurred - during an operation.""" + """Used to indicate that an attempt to use a disabled publisher occurred + during an operation.""" - def __str__(self): - return _("Publisher '{0}' is disabled and cannot be used for " - "packaging operations.").format(self.data) + def __str__(self): + return _( + "Publisher '{0}' is disabled and cannot be used for " + "packaging operations." + ).format(self.data) class DuplicatePublisher(PublisherError): - """Used to indicate that a publisher with the same name or alias already - exists for an image.""" + """Used to indicate that a publisher with the same name or alias already + exists for an image.""" - def __str__(self): - return _("A publisher with the same name or alias as '{0}' " - "already exists.").format(self.data) + def __str__(self): + return _( + "A publisher with the same name or alias as '{0}' " + "already exists." + ).format(self.data) class DuplicateRepository(PublisherError): - """Used to indicate that a repository with the same origin uris - already exists for a publisher.""" + """Used to indicate that a repository with the same origin uris + already exists for a publisher.""" - def __str__(self): - return _("A repository with the same name or origin URIs " - "already exists for publisher '{0}'.").format(self.data) + def __str__(self): + return _( + "A repository with the same name or origin URIs " + "already exists for publisher '{0}'." + ).format(self.data) class DuplicateRepositoryMirror(PublisherError): - """Used to indicate that a repository URI is already in use by another - repository mirror.""" + """Used to indicate that a repository URI is already in use by another + repository mirror.""" - def __str__(self): - return _("Mirror '{0}' already exists for the specified " - "publisher.").format(self.data) + def __str__(self): + return _( + "Mirror '{0}' already exists for the specified " "publisher." + ).format(self.data) class DuplicateSyspubMirror(PublisherError): - """Used to indicate that a repository URI is already in use by the - system publisher.""" + """Used to indicate that a repository URI is already in use by the + system publisher.""" - def __str__(self): - return _("Mirror '{0}' is already accessible through the " - "system repository.").format(self.data) + def __str__(self): + return _( + "Mirror '{0}' is already accessible through the " + "system repository." + ).format(self.data) class DuplicateRepositoryOrigin(PublisherError): - """Used to indicate that a repository URI is already in use by another - repository origin.""" + """Used to indicate that a repository URI is already in use by another + repository origin.""" - def __str__(self): - return _("Origin '{0}' already exists for the specified " - "publisher.").format(self.data) + def __str__(self): + return _( + "Origin '{0}' already exists for the specified " "publisher." + ).format(self.data) class DuplicateSyspubOrigin(PublisherError): - """Used to indicate that a repository URI is already in use by the - system publisher.""" + """Used to indicate that a repository URI is already in use by the + system publisher.""" - def __str__(self): - return _("Origin '{0}' is already accessible through the " - "system repository.").format(self.data) + def __str__(self): + return _( + "Origin '{0}' is already accessible through the " + "system repository." + ).format(self.data) class RemoveSyspubOrigin(PublisherError): - """Used to indicate that a system publisher origin may not be - removed.""" + """Used to indicate that a system publisher origin may not be + removed.""" + + def __str__(self): + return _( + "Unable to remove origin '{0}' since it is provided " + "by the system repository." + ).format(self.data) - def __str__(self): - return _("Unable to remove origin '{0}' since it is provided " - "by the system repository.").format(self.data) class RemoveSyspubMirror(PublisherError): - """Used to indicate that a system publisher mirror may not be - removed.""" + """Used to indicate that a system publisher mirror may not be + removed.""" - def __str__(self): - return _("Unable to remove mirror '{0}' since it is provided " - "by the system repository.").format(self.data) + def __str__(self): + return _( + "Unable to remove mirror '{0}' since it is provided " + "by the system repository." + ).format(self.data) class NoPublisherRepositories(TransportError): - """Used to indicate that a Publisher has no repository information - configured and so transport operations cannot be performed.""" + """Used to indicate that a Publisher has no repository information + configured and so transport operations cannot be performed.""" - def __init__(self, prefix): - TransportError.__init__(self) - self.publisher = prefix + def __init__(self, prefix): + TransportError.__init__(self) + self.publisher = prefix - def __str__(self): - return _(""" + def __str__(self): + return _( + """ The requested operation requires that one or more package repositories are configured for publisher '{0}'. Use 'pkg set-publisher' to add new package repositories or restore previously -configured package repositories for publisher '{0}'.""").format(self.publisher) +configured package repositories for publisher '{0}'.""" + ).format(self.publisher) class MoveRelativeToSelf(PublisherError): - """Used to indicate an attempt to search a repo before or after itself""" + """Used to indicate an attempt to search a repo before or after itself""" - def __str__(self): - return _("Cannot search a repository before or after itself") + def __str__(self): + return _("Cannot search a repository before or after itself") class MoveRelativeToUnknown(PublisherError): - """Used to indicate an attempt to order a publisher relative to an - unknown publisher.""" + """Used to indicate an attempt to order a publisher relative to an + unknown publisher.""" - def __init__(self, unknown_pub): - self.__unknown_pub = unknown_pub + def __init__(self, unknown_pub): + self.__unknown_pub = unknown_pub - def __str__(self): - return _("{0} is an unknown publisher; no other publishers can " - "be ordered relative to it.").format(self.__unknown_pub) + def __str__(self): + return _( + "{0} is an unknown publisher; no other publishers can " + "be ordered relative to it." + ).format(self.__unknown_pub) class SelectedRepositoryRemoval(PublisherError): - """Used to indicate that an attempt to remove the selected repository - for a publisher was made.""" + """Used to indicate that an attempt to remove the selected repository + for a publisher was made.""" - def __str__(self): - return _("Cannot remove the selected repository for a " - "publisher.") + def __str__(self): + return _("Cannot remove the selected repository for a " "publisher.") class UnknownLegalURI(PublisherError): - """Used to indicate that no matching legal URI could be found using the - provided criteria.""" + """Used to indicate that no matching legal URI could be found using the + provided criteria.""" - def __str__(self): - return _("Unknown legal URI '{0}'.").format(self.data) + def __str__(self): + return _("Unknown legal URI '{0}'.").format(self.data) class UnknownPublisher(PublisherError): - """Used to indicate that no matching publisher could be found using the - provided criteria.""" + """Used to indicate that no matching publisher could be found using the + provided criteria.""" - def __str__(self): - return _("Unknown publisher '{0}'.").format(self.data) + def __str__(self): + return _("Unknown publisher '{0}'.").format(self.data) class UnknownRepositoryPublishers(PublisherError): - """Used to indicate that one or more publisher prefixes are unknown by - the specified repository.""" - - def __init__(self, known=EmptyI, unknown=EmptyI, location=None, - origins=EmptyI): - ApiException.__init__(self) - self.known = known - self.location = location - self.origins = origins - self.unknown = unknown - - def __str__(self): - if self.location: - return _("The repository at {location} does not " - "contain package data for {unknown}; only " - "{known}.\n\nThis is either because the " - "repository location is not valid, or because the " - "provided publisher does not match those known by " - "the repository.").format( - unknown=", ".join(self.unknown), - location=self.location, - known=", ".join(self.known)) - if self.origins: - return _("One or more of the repository origin(s) " - "listed below contains package data for " - "{known}; not {unknown}:\n\n{origins}\n\n" - "This is either because one of the repository " - "origins is not valid for this publisher, or " - "because the list of known publishers retrieved " - "from the repository origin does not match the " - "client.").format(unknown=", ".join(self.unknown), - known=", ".join(self.known), - origins="\n".join(str(o) for o in self.origins)) - return _("The specified publisher repository does not " - "contain any package data for {unknown}; only " - "{known}.").format(unknown=", ".join(self.unknown), - known=", ".join(self.known)) + """Used to indicate that one or more publisher prefixes are unknown by + the specified repository.""" + + def __init__( + self, known=EmptyI, unknown=EmptyI, location=None, origins=EmptyI + ): + ApiException.__init__(self) + self.known = known + self.location = location + self.origins = origins + self.unknown = unknown + + def __str__(self): + if self.location: + return _( + "The repository at {location} does not " + "contain package data for {unknown}; only " + "{known}.\n\nThis is either because the " + "repository location is not valid, or because the " + "provided publisher does not match those known by " + "the repository." + ).format( + unknown=", ".join(self.unknown), + location=self.location, + known=", ".join(self.known), + ) + if self.origins: + return _( + "One or more of the repository origin(s) " + "listed below contains package data for " + "{known}; not {unknown}:\n\n{origins}\n\n" + "This is either because one of the repository " + "origins is not valid for this publisher, or " + "because the list of known publishers retrieved " + "from the repository origin does not match the " + "client." + ).format( + unknown=", ".join(self.unknown), + known=", ".join(self.known), + origins="\n".join(str(o) for o in self.origins), + ) + return _( + "The specified publisher repository does not " + "contain any package data for {unknown}; only " + "{known}." + ).format(unknown=", ".join(self.unknown), known=", ".join(self.known)) class UnknownRelatedURI(PublisherError): - """Used to indicate that no matching related URI could be found using - the provided criteria.""" + """Used to indicate that no matching related URI could be found using + the provided criteria.""" - def __str__(self): - return _("Unknown related URI '{0}'.").format(self.data) + def __str__(self): + return _("Unknown related URI '{0}'.").format(self.data) class UnknownRepository(PublisherError): - """Used to indicate that no matching repository could be found using the - provided criteria.""" + """Used to indicate that no matching repository could be found using the + provided criteria.""" - def __str__(self): - return _("Unknown repository '{0}'.").format(self.data) + def __str__(self): + return _("Unknown repository '{0}'.").format(self.data) class UnknownRepositoryMirror(PublisherError): - """Used to indicate that a repository URI could not be found in the - list of repository mirrors.""" + """Used to indicate that a repository URI could not be found in the + list of repository mirrors.""" + + def __str__(self): + return _("Unknown repository mirror '{0}'.").format(self.data) - def __str__(self): - return _("Unknown repository mirror '{0}'.").format(self.data) class UnsupportedRepositoryOperation(TransportError): - """The publisher has no active repositories that support the - requested operation.""" + """The publisher has no active repositories that support the + requested operation.""" - def __init__(self, pub, operation): - ApiException.__init__(self) - self.data = None - self.kwargs = None - self.pub = pub - self.op = operation + def __init__(self, pub, operation): + ApiException.__init__(self) + self.data = None + self.kwargs = None + self.pub = pub + self.op = operation - def __str__(self): - return _("Publisher '{pub}' has no repositories that support " - "the '{op}' operation.").format(**self.__dict__) + def __str__(self): + return _( + "Publisher '{pub}' has no repositories that support " + "the '{op}' operation." + ).format(**self.__dict__) class RepoPubConfigUnavailable(PublisherError): - """Used to indicate that the specified repository does not provide - publisher configuration information.""" - - def __init__(self, location=None, pub=None): - ApiException.__init__(self) - self.location = location - self.pub = pub - - def __str__(self): - if not self.location and not self.pub: - return _("The specified package repository does not " - "provide publisher configuration information.") - if self.location: - return _("The package repository at {0} does not " - "provide publisher configuration information or " - "the information provided is incomplete.").format( - self.location) - return _("One of the package repository origins for {0} does " - "not provide publisher configuration information or the " - "information provided is incomplete.").format(self.pub) + """Used to indicate that the specified repository does not provide + publisher configuration information.""" + + def __init__(self, location=None, pub=None): + ApiException.__init__(self) + self.location = location + self.pub = pub + + def __str__(self): + if not self.location and not self.pub: + return _( + "The specified package repository does not " + "provide publisher configuration information." + ) + if self.location: + return _( + "The package repository at {0} does not " + "provide publisher configuration information or " + "the information provided is incomplete." + ).format(self.location) + return _( + "One of the package repository origins for {0} does " + "not provide publisher configuration information or the " + "information provided is incomplete." + ).format(self.pub) class UnknownRepositoryOrigin(PublisherError): - """Used to indicate that a repository URI could not be found in the - list of repository origins.""" + """Used to indicate that a repository URI could not be found in the + list of repository origins.""" - def __str__(self): - return _("Unknown repository origin '{0}'").format(self.data) + def __str__(self): + return _("Unknown repository origin '{0}'").format(self.data) class UnsupportedRepositoryURI(PublisherError): - """Used to indicate that the specified repository URI uses an - unsupported scheme.""" - - def __init__(self, uris=[]): - if isinstance(uris, six.string_types): - uris = [uris] - - assert isinstance(uris, (list, tuple, set)) - - self.uris = uris - - def __str__(self): - illegals = [] - - for u in self.uris: - assert isinstance(u, six.string_types) - scheme = urlsplit(u, - allow_fragments=0)[0] - illegals.append((u, scheme)) - - if len(illegals) > 1: - msg = _("The follwing URIs use unsupported " - "schemes. Supported schemes are " - "file://, http://, and https://.") - for i, s in illegals: - msg += _("\n {uri} (scheme: " - "{scheme})").format(uri=i, scheme=s) - return msg - elif len(illegals) == 1: - i, s = illegals[0] - return _("The URI '{uri}' uses the unsupported " - "scheme '{scheme}'. Supported schemes are " - "file://, http://, and https://.").format( - uri=i, scheme=s) - return _("The specified URI uses an unsupported scheme." - " Supported schemes are: file://, http://, and " - "https://.") + """Used to indicate that the specified repository URI uses an + unsupported scheme.""" + + def __init__(self, uris=[]): + if isinstance(uris, six.string_types): + uris = [uris] + + assert isinstance(uris, (list, tuple, set)) + + self.uris = uris + + def __str__(self): + illegals = [] + + for u in self.uris: + assert isinstance(u, six.string_types) + scheme = urlsplit(u, allow_fragments=0)[0] + illegals.append((u, scheme)) + + if len(illegals) > 1: + msg = _( + "The follwing URIs use unsupported " + "schemes. Supported schemes are " + "file://, http://, and https://." + ) + for i, s in illegals: + msg += _("\n {uri} (scheme: " "{scheme})").format( + uri=i, scheme=s + ) + return msg + elif len(illegals) == 1: + i, s = illegals[0] + return _( + "The URI '{uri}' uses the unsupported " + "scheme '{scheme}'. Supported schemes are " + "file://, http://, and https://." + ).format(uri=i, scheme=s) + return _( + "The specified URI uses an unsupported scheme." + " Supported schemes are: file://, http://, and " + "https://." + ) class UnsupportedRepositoryURIAttribute(PublisherError): - """Used to indicate that the specified repository URI attribute is not - supported for the URI's scheme.""" + """Used to indicate that the specified repository URI attribute is not + supported for the URI's scheme.""" - def __str__(self): - return _("'{attr}' is not supported for '{scheme}'.").format( - attr=self.data, scheme=self._args["scheme"]) + def __str__(self): + return _("'{attr}' is not supported for '{scheme}'.").format( + attr=self.data, scheme=self._args["scheme"] + ) class UnsupportedProxyURI(PublisherError): - """Used to indicate that the specified proxy URI is unsupported.""" - - def __str__(self): - if self.data: - scheme = urlsplit(self.data, - allow_fragments=0)[0] - return _("The proxy URI '{uri}' uses the unsupported " - "scheme '{scheme}'. Currently the only supported " - "scheme is http://.").format( - uri=self.data, scheme=scheme) - return _("The specified proxy URI uses an unsupported scheme." - " Currently the only supported scheme is: http://.") + """Used to indicate that the specified proxy URI is unsupported.""" + + def __str__(self): + if self.data: + scheme = urlsplit(self.data, allow_fragments=0)[0] + return _( + "The proxy URI '{uri}' uses the unsupported " + "scheme '{scheme}'. Currently the only supported " + "scheme is http://." + ).format(uri=self.data, scheme=scheme) + return _( + "The specified proxy URI uses an unsupported scheme." + " Currently the only supported scheme is: http://." + ) + class BadProxyURI(PublisherError): - """Used to indicate that a proxy URI is not syntactically valid.""" + """Used to indicate that a proxy URI is not syntactically valid.""" - def __str__(self): - return _("'{0}' is not a valid URI.").format(self.data) + def __str__(self): + return _("'{0}' is not a valid URI.").format(self.data) class UnknownSysrepoConfiguration(ApiException): - """Used when a pkg client needs to communicate with the system - repository but can't find the configuration for it.""" + """Used when a pkg client needs to communicate with the system + repository but can't find the configuration for it.""" - def __str__(self): - return _("""\ + def __str__(self): + return _( + """\ pkg is configured to use the system repository (via the use-system-repo property) but it could not get the host and port from svc:/application/pkg/zones-proxy-client nor svc:/application/pkg/system-repository, and the PKG_SYSREPO_URL environment variable was not set. Please try enabling one of those services or setting the PKG_SYSREPO_URL environment variable. -""") +""" + ) class ModifyingSyspubException(ApiException): - """This exception is raised when a user attempts to modify a system - publisher.""" + """This exception is raised when a user attempts to modify a system + publisher.""" - def __init__(self, s): - self.s = s + def __init__(self, s): + self.s = s - def __str__(self): - return self.s + def __str__(self): + return self.s class SigningException(ApiException): - """The base class for exceptions related to manifest signing.""" - - def __init__(self, pfmri=None, sig=None): - self.pfmri = pfmri - self.sig = sig - - # This string method is used by subclasses to fill in the details - # about the package and signature involved. - def __str__(self): - if self.pfmri: - if self.sig: - return _("The relevant signature action is " - "found in {pfmri} and has a hash of " - "{hsh}").format( - pfmri=self.pfmri, hsh=self.sig.hash) - return _("The package involved is {0}").format( - self.pfmri) - if self.sig: - return _("The relevant signature action's value " - "attribute is {0}").format(self.sig.attrs["value"]) - return "" + """The base class for exceptions related to manifest signing.""" + + def __init__(self, pfmri=None, sig=None): + self.pfmri = pfmri + self.sig = sig + + # This string method is used by subclasses to fill in the details + # about the package and signature involved. + def __str__(self): + if self.pfmri: + if self.sig: + return _( + "The relevant signature action is " + "found in {pfmri} and has a hash of " + "{hsh}" + ).format(pfmri=self.pfmri, hsh=self.sig.hash) + return _("The package involved is {0}").format(self.pfmri) + if self.sig: + return _( + "The relevant signature action's value " "attribute is {0}" + ).format(self.sig.attrs["value"]) + return "" class BadFileFormat(SigningException): - """Exception used when a key, certificate or CRL file is not in a - recognized format.""" + """Exception used when a key, certificate or CRL file is not in a + recognized format.""" - def __init__(self, txt): - self.txt = txt + def __init__(self, txt): + self.txt = txt - def __str__(self): - return self.txt + def __str__(self): + return self.txt class UnsupportedSignatureVersion(SigningException): - """Exception used when a signature reports a version which this version - of pkg(7) doesn't support.""" + """Exception used when a signature reports a version which this version + of pkg(7) doesn't support.""" - def __init__(self, version, *args, **kwargs): - SigningException.__init__(self, *args, **kwargs) - self.version = version + def __init__(self, version, *args, **kwargs): + SigningException.__init__(self, *args, **kwargs) + self.version = version - def __str__(self): - return _("The signature action {act} was made using a " - "version ({ver}) this version of pkg(7) doesn't " - "understand.").format(act=self.sig, ver=self.version) + def __str__(self): + return _( + "The signature action {act} was made using a " + "version ({ver}) this version of pkg(7) doesn't " + "understand." + ).format(act=self.sig, ver=self.version) class CertificateException(SigningException): - """Base class for exceptions encountered while establishing the chain - of trust.""" + """Base class for exceptions encountered while establishing the chain + of trust.""" - def __init__(self, cert, pfmri=None): - SigningException.__init__(self, pfmri) - self.cert = cert + def __init__(self, cert, pfmri=None): + SigningException.__init__(self, pfmri) + self.cert = cert class ModifiedCertificateException(CertificateException): - """Exception used when a certificate does not match its expected hash - value.""" + """Exception used when a certificate does not match its expected hash + value.""" - def __init__(self, cert, path, pfmri=None): - CertificateException.__init__(self, cert, pfmri) - self.path = path + def __init__(self, cert, path, pfmri=None): + CertificateException.__init__(self, cert, pfmri) + self.path = path - def __str__(self): - return _("Certificate {0} has been modified on disk. Its hash " - "value is not what was expected.").format(self.path) + def __str__(self): + return _( + "Certificate {0} has been modified on disk. Its hash " + "value is not what was expected." + ).format(self.path) class UntrustedSelfSignedCert(CertificateException): - """Exception used when a chain of trust is rooted in an untrusted - self-signed certificate.""" - - def __str__(self): - return _("The signing certificate chain is rooted in a " + \ - "certificate not present in the trust-anchor-directory.\n" + \ - "See the image properties section of pkg(1).\n") + \ - "Certificate Subject:" + \ - self.cert.subject.rfc4514_string() + "\n" + \ - CertificateException.__str__(self) + """Exception used when a chain of trust is rooted in an untrusted + self-signed certificate.""" + + def __str__(self): + return ( + _( + "The signing certificate chain is rooted in a " + + "certificate not present in the trust-anchor-directory.\n" + + "See the image properties section of pkg(1).\n" + ) + + "Certificate Subject:" + + self.cert.subject.rfc4514_string() + + "\n" + + CertificateException.__str__(self) + ) class BrokenChain(CertificateException): - """Exception used when a chain of trust can not be established between - the leaf certificate and a trust anchor.""" - - def __init__(self, cert, cert_exceptions, *args, **kwargs): - CertificateException.__init__(self, cert, *args, **kwargs) - self.ext_exs = cert_exceptions - - def __str__(self): - s = "" - if self.ext_exs: - s = _("The following problems were encountered:\n") + \ - "\n".join([str(e) for e in self.ext_exs]) - return _("The certificate which issued this " - "certificate: {subj} could not be found. The issuer " - "is: {issuer}\n").format(subj="/".join("{0}={1}".format( - sub.oid._name, sub.value) for sub in self.cert.subject), - issuer="/".join("{0}={1}".format(i.oid._name, i.value) - for i in self.cert.issuer)) + s + "\n" + \ - CertificateException.__str__(self) + """Exception used when a chain of trust can not be established between + the leaf certificate and a trust anchor.""" + + def __init__(self, cert, cert_exceptions, *args, **kwargs): + CertificateException.__init__(self, cert, *args, **kwargs) + self.ext_exs = cert_exceptions + + def __str__(self): + s = "" + if self.ext_exs: + s = _("The following problems were encountered:\n") + "\n".join( + [str(e) for e in self.ext_exs] + ) + return ( + _( + "The certificate which issued this " + "certificate: {subj} could not be found. The issuer " + "is: {issuer}\n" + ).format( + subj="/".join( + "{0}={1}".format(sub.oid._name, sub.value) + for sub in self.cert.subject + ), + issuer="/".join( + "{0}={1}".format(i.oid._name, i.value) + for i in self.cert.issuer + ), + ) + + s + + "\n" + + CertificateException.__str__(self) + ) class RevokedCertificate(CertificateException): - """Exception used when a chain of trust contains a revoked certificate. - """ - - def __init__(self, cert, reason, *args, **kwargs): - CertificateException.__init__(self, cert, *args, **kwargs) - self.reason = reason - - def __str__(self): - return _("This certificate was revoked:{cert} for this " - "reason:\n{reason}\n").format(cert="/".join("{0}={1}".format( - s.oid._name, s.value) for s in self.cert.subject), - reason=self.reason) + CertificateException.__str__(self) + """Exception used when a chain of trust contains a revoked certificate.""" + + def __init__(self, cert, reason, *args, **kwargs): + CertificateException.__init__(self, cert, *args, **kwargs) + self.reason = reason + + def __str__(self): + return _( + "This certificate was revoked:{cert} for this " + "reason:\n{reason}\n" + ).format( + cert="/".join( + "{0}={1}".format(s.oid._name, s.value) + for s in self.cert.subject + ), + reason=self.reason, + ) + CertificateException.__str__( + self + ) class UnverifiedSignature(SigningException): - """Exception used when a signature could not be verified by the - expected certificate.""" - - def __init__(self, sig, reason, pfmri=None): - SigningException.__init__(self, pfmri) - self.sig = sig - self.reason = reason - - def __str__(self): - if self.pfmri: - return _("A signature in {pfmri} could not be " - "verified for " - "this reason:\n{reason}\nThe signature's hash is " - "{hash}").format(pfmri=self.pfmri, - reason=self.reason, - hash=self.sig.hash) - return _("The signature with this signature value:\n" - "{sigval}\n could not be verified for this reason:\n" - "{reason}\n").format(reason=self.reason, - sigval=self.sig.attrs["value"]) + """Exception used when a signature could not be verified by the + expected certificate.""" + + def __init__(self, sig, reason, pfmri=None): + SigningException.__init__(self, pfmri) + self.sig = sig + self.reason = reason + + def __str__(self): + if self.pfmri: + return _( + "A signature in {pfmri} could not be " + "verified for " + "this reason:\n{reason}\nThe signature's hash is " + "{hash}" + ).format(pfmri=self.pfmri, reason=self.reason, hash=self.sig.hash) + return _( + "The signature with this signature value:\n" + "{sigval}\n could not be verified for this reason:\n" + "{reason}\n" + ).format(reason=self.reason, sigval=self.sig.attrs["value"]) class RequiredSignaturePolicyException(SigningException): - """Exception used when signatures were required but none were found.""" - - def __init__(self, pub, pfmri=None): - SigningException.__init__(self, pfmri) - self.pub = pub - - def __str__(self): - pub_str = self.pub.prefix - if self.pfmri: - return _("The policy for {pub_str} requires " - "signatures to be present but no signature was " - "found in {fmri_str}.").format( - pub_str=pub_str, fmri_str=self.pfmri) - return _("The policy for {pub_str} requires signatures to be " - "present but no signature was found.").format( - pub_str=pub_str) + """Exception used when signatures were required but none were found.""" + + def __init__(self, pub, pfmri=None): + SigningException.__init__(self, pfmri) + self.pub = pub + + def __str__(self): + pub_str = self.pub.prefix + if self.pfmri: + return _( + "The policy for {pub_str} requires " + "signatures to be present but no signature was " + "found in {fmri_str}." + ).format(pub_str=pub_str, fmri_str=self.pfmri) + return _( + "The policy for {pub_str} requires signatures to be " + "present but no signature was found." + ).format(pub_str=pub_str) class MissingRequiredNamesException(SigningException): - """Exception used when a signature policy required names to be seen - which weren't seen.""" - - def __init__(self, pub, missing_names, pfmri=None): - SigningException.__init__(self, pfmri) - self.pub = pub - self.missing_names = missing_names - - def __str__(self): - pub_str = self.pub.prefix - if self.pfmri: - return _("The policy for {pub_str} requires certain " - "CNs to be seen in a chain of trust. The following " - "required names couldn't be found for this " - "package:{fmri_str}.\n{missing}").format( - pub_str=pub_str, fmri_str=self.pfmri, - missing="\n".join(self.missing_names)) - return _("The policy for {pub_str} requires certain CNs to " - "be seen in a chain of trust. The following required names " - "couldn't be found.\n{missing}").format(pub_str=pub_str, - missing="\n".join(self.missing_names)) + """Exception used when a signature policy required names to be seen + which weren't seen.""" + + def __init__(self, pub, missing_names, pfmri=None): + SigningException.__init__(self, pfmri) + self.pub = pub + self.missing_names = missing_names + + def __str__(self): + pub_str = self.pub.prefix + if self.pfmri: + return _( + "The policy for {pub_str} requires certain " + "CNs to be seen in a chain of trust. The following " + "required names couldn't be found for this " + "package:{fmri_str}.\n{missing}" + ).format( + pub_str=pub_str, + fmri_str=self.pfmri, + missing="\n".join(self.missing_names), + ) + return _( + "The policy for {pub_str} requires certain CNs to " + "be seen in a chain of trust. The following required names " + "couldn't be found.\n{missing}" + ).format(pub_str=pub_str, missing="\n".join(self.missing_names)) + class UnsupportedCriticalExtension(SigningException): - """Exception used when a certificate in the chain of trust uses a - critical extension pkg doesn't understand.""" - - def __init__(self, cert, ext): - SigningException.__init__(self) - self.cert = cert - self.ext = ext - - def __str__(self): - return _("The certificate whose subject is {cert} could not " - "be verified because it uses an unsupported critical " - "extension.\nExtension name: {name}\nExtension " - "value: {val}").format(cert="/".join("{0}={1}".format( - s.oid._name, s.value) for s in self.cert.subject), - name=self.ext.oid._name, val=self.ext.value) + """Exception used when a certificate in the chain of trust uses a + critical extension pkg doesn't understand.""" + + def __init__(self, cert, ext): + SigningException.__init__(self) + self.cert = cert + self.ext = ext + + def __str__(self): + return _( + "The certificate whose subject is {cert} could not " + "be verified because it uses an unsupported critical " + "extension.\nExtension name: {name}\nExtension " + "value: {val}" + ).format( + cert="/".join( + "{0}={1}".format(s.oid._name, s.value) + for s in self.cert.subject + ), + name=self.ext.oid._name, + val=self.ext.value, + ) + class UnsupportedExtensionValue(SigningException): - """Exception used when a certificate in the chain of trust has an - extension with a value pkg doesn't understand.""" - - def __init__(self, cert, ext, val, bad_val=None): - SigningException.__init__(self) - self.cert = cert - self.ext = ext - self.val = val - self.bad_val = bad_val - - def __str__(self): - s = _("The certificate whose subject is {cert} could not be " - "verified because it has an extension with a value that " - "pkg(7) does not understand." - "\nExtension name: {name}\nExtension value: {val}").format( - cert="/".join("{0}={1}".format( - s.oid._name, s.value) for s in self.cert.subject), - name=self.ext.oid._name, val=self.val) - if self.bad_val: - s += _("\nProblematic value: {0}").format(self.bad_val) - return s + """Exception used when a certificate in the chain of trust has an + extension with a value pkg doesn't understand.""" + + def __init__(self, cert, ext, val, bad_val=None): + SigningException.__init__(self) + self.cert = cert + self.ext = ext + self.val = val + self.bad_val = bad_val + + def __str__(self): + s = _( + "The certificate whose subject is {cert} could not be " + "verified because it has an extension with a value that " + "pkg(7) does not understand." + "\nExtension name: {name}\nExtension value: {val}" + ).format( + cert="/".join( + "{0}={1}".format(s.oid._name, s.value) + for s in self.cert.subject + ), + name=self.ext.oid._name, + val=self.val, + ) + if self.bad_val: + s += _("\nProblematic value: {0}").format(self.bad_val) + return s + class InvalidCertificateExtensions(SigningException): - """Exception used when a certificate in the chain of trust has - invalid extensions.""" - - def __init__(self, cert, error): - SigningException.__init__(self) - self.cert = cert - self.error = error - - def __str__(self): - s = _("The certificate whose subject is {cert} could not be " - "verified because it has invalid extensions:\n{error}" - ).format(cert="/".join("{0}={1}".format( - s.oid._name, s.value) for s in self.cert.subject), - error=self.error) - return s + """Exception used when a certificate in the chain of trust has + invalid extensions.""" + + def __init__(self, cert, error): + SigningException.__init__(self) + self.cert = cert + self.error = error + + def __str__(self): + s = _( + "The certificate whose subject is {cert} could not be " + "verified because it has invalid extensions:\n{error}" + ).format( + cert="/".join( + "{0}={1}".format(s.oid._name, s.value) + for s in self.cert.subject + ), + error=self.error, + ) + return s + class InappropriateCertificateUse(SigningException): - """Exception used when a certificate in the chain of trust has been used - inappropriately. An example would be a certificate which was only - supposed to be used to sign code being used to sign other certificates. - """ - - def __init__(self, cert, ext, use, val): - SigningException.__init__(self) - self.cert = cert - self.ext = ext - self.use = use - self.val = val - - def __str__(self): - return _("The certificate whose subject is {cert} could not " - "be verified because it has been used inappropriately. " - "The way it is used means that the value for extension " - "{name} must include '{use}' but the value was " - "'{val}'.").format(cert="/".join("{0}={1}".format( - s.oid._name, s.value) for s in self.cert.subject), - use=self.use, name=self.ext.oid._name, - val=self.val) + """Exception used when a certificate in the chain of trust has been used + inappropriately. An example would be a certificate which was only + supposed to be used to sign code being used to sign other certificates. + """ + + def __init__(self, cert, ext, use, val): + SigningException.__init__(self) + self.cert = cert + self.ext = ext + self.use = use + self.val = val + + def __str__(self): + return _( + "The certificate whose subject is {cert} could not " + "be verified because it has been used inappropriately. " + "The way it is used means that the value for extension " + "{name} must include '{use}' but the value was " + "'{val}'." + ).format( + cert="/".join( + "{0}={1}".format(s.oid._name, s.value) + for s in self.cert.subject + ), + use=self.use, + name=self.ext.oid._name, + val=self.val, + ) + class PathlenTooShort(InappropriateCertificateUse): - """Exception used when a certificate in the chain of trust has been used - inappropriately. An example would be a certificate which was only - supposed to be used to sign code being used to sign other certificates. - """ - - def __init__(self, cert, actual_length, cert_length): - SigningException.__init__(self) - self.cert = cert - self.al = actual_length - self.cl = cert_length - - def __str__(self): - return _("The certificate whose subject is {cert} could not " - "be verified because it has been used inappropriately. " - "There can only be {cl} certificates between this " - "certificate and the leaf certificate. There are {al} " - "certificates between this certificate and the leaf in " - "this chain.").format( - cert="/".join("{0}={1}".format( - s.oid._name, s.value) for s in self.cert.subject), - al=self.al, - cl=self.cl - ) + """Exception used when a certificate in the chain of trust has been used + inappropriately. An example would be a certificate which was only + supposed to be used to sign code being used to sign other certificates. + """ + + def __init__(self, cert, actual_length, cert_length): + SigningException.__init__(self) + self.cert = cert + self.al = actual_length + self.cl = cert_length + + def __str__(self): + return _( + "The certificate whose subject is {cert} could not " + "be verified because it has been used inappropriately. " + "There can only be {cl} certificates between this " + "certificate and the leaf certificate. There are {al} " + "certificates between this certificate and the leaf in " + "this chain." + ).format( + cert="/".join( + "{0}={1}".format(s.oid._name, s.value) + for s in self.cert.subject + ), + al=self.al, + cl=self.cl, + ) class AlmostIdentical(ApiException): - """Exception used when a package already has a signature action which is - nearly identical to the one being added but differs on some - attributes.""" - - def __init__(self, hsh, algorithm, version, pkg=None): - self.hsh = hsh - self.algorithm = algorithm - self.version = version - self.pkg = pkg - - def __str__(self): - s = _("The signature to be added to the package has the same " - "hash ({hash}), algorithm ({algorithm}), and " - "version ({version}) as an existing signature, but " - "doesn't match the signature exactly. For this signature " - "to be added, the existing signature must be removed.").format( - hash=self.hsh, - algorithm=self.algorithm, - version=self.version - ) - if self.pkg: - s += _("The package being signed was {pkg}").format( - pkg=self.pkg) - return s + """Exception used when a package already has a signature action which is + nearly identical to the one being added but differs on some + attributes.""" + + def __init__(self, hsh, algorithm, version, pkg=None): + self.hsh = hsh + self.algorithm = algorithm + self.version = version + self.pkg = pkg + + def __str__(self): + s = _( + "The signature to be added to the package has the same " + "hash ({hash}), algorithm ({algorithm}), and " + "version ({version}) as an existing signature, but " + "doesn't match the signature exactly. For this signature " + "to be added, the existing signature must be removed." + ).format(hash=self.hsh, algorithm=self.algorithm, version=self.version) + if self.pkg: + s += _("The package being signed was {pkg}").format(pkg=self.pkg) + return s class DuplicateSignaturesAlreadyExist(ApiException): - """Exception used when a package already has a signature action which is - nearly identical to the one being added but differs on some - attributes.""" + """Exception used when a package already has a signature action which is + nearly identical to the one being added but differs on some + attributes.""" - def __init__(self, pfmri): - self.pfmri = pfmri + def __init__(self, pfmri): + self.pfmri = pfmri - def __str__(self): - return _("{0} could not be signed because it already has two " - "copies of this signature in it. One of those signature " - "actions must be removed before the package is given to " - "users.").format(self.pfmri) + def __str__(self): + return _( + "{0} could not be signed because it already has two " + "copies of this signature in it. One of those signature " + "actions must be removed before the package is given to " + "users." + ).format(self.pfmri) class InvalidPropertyValue(ApiException): - """Exception used when a property was set to an invalid value.""" + """Exception used when a property was set to an invalid value.""" - def __init__(self, s): - ApiException.__init__(self) - self.str = s + def __init__(self, s): + ApiException.__init__(self) + self.str = s - def __str__(self): - return self.str + def __str__(self): + return self.str class CertificateError(ApiException): - """Base exception class for all certificate exceptions.""" + """Base exception class for all certificate exceptions.""" - def __init__(self, *args, **kwargs): - ApiException.__init__(self, *args) - if args: - self.data = args[0] - else: - self.data = None - self._args = kwargs + def __init__(self, *args, **kwargs): + ApiException.__init__(self, *args) + if args: + self.data = args[0] + else: + self.data = None + self._args = kwargs - def __str__(self): - return str(self.data) + def __str__(self): + return str(self.data) class ExpiredCertificate(CertificateError): - """Used to indicate that a certificate has expired.""" - - def __init__(self, *args, **kwargs): - CertificateError.__init__(self, *args, **kwargs) - self.publisher = self._args.get("publisher", None) - self.uri = self._args.get("uri", None) - - def __str__(self): - if self.publisher: - if self.uri: - return _("Certificate '{cert}' for publisher " - "'{pub}' needed to access '{uri}', " - "has expired. Please install a valid " - "certificate.").format(cert=self.data, - pub=self.publisher, uri=self.uri) - return _("Certificate '{cert}' for publisher " - "'{pub}', has expired. Please install a valid " - "certificate.").format(cert=self.data, - pub=self.publisher) - if self.uri: - return _("Certificate '{cert}', needed to access " - "'{uri}', has expired. Please install a valid " - "certificate.").format(cert=self.data, - uri=self.uri) - return _("Certificate '{0}' has expired. Please install a " - "valid certificate.").format(self.data) + """Used to indicate that a certificate has expired.""" + + def __init__(self, *args, **kwargs): + CertificateError.__init__(self, *args, **kwargs) + self.publisher = self._args.get("publisher", None) + self.uri = self._args.get("uri", None) + + def __str__(self): + if self.publisher: + if self.uri: + return _( + "Certificate '{cert}' for publisher " + "'{pub}' needed to access '{uri}', " + "has expired. Please install a valid " + "certificate." + ).format(cert=self.data, pub=self.publisher, uri=self.uri) + return _( + "Certificate '{cert}' for publisher " + "'{pub}', has expired. Please install a valid " + "certificate." + ).format(cert=self.data, pub=self.publisher) + if self.uri: + return _( + "Certificate '{cert}', needed to access " + "'{uri}', has expired. Please install a valid " + "certificate." + ).format(cert=self.data, uri=self.uri) + return _( + "Certificate '{0}' has expired. Please install a " + "valid certificate." + ).format(self.data) class ExpiredCertificates(CertificateError): - """Used to collect ExpiredCertficate exceptions.""" - - def __init__(self, errors): - - self.errors = [] - - assert (isinstance(errors, (list, tuple, - set, ExpiredCertificate))) - - if isinstance(errors, ExpiredCertificate): - self.errors.append(errors) - else: - self.errors = errors - - def __str__(self): - pdict = dict() - for e in self.errors: - if e.publisher in pdict: - pdict[e.publisher].append(e.uri) - else: - pdict[e.publisher] = [e.uri] - - msg = "" - for pub, uris in pdict.items(): - msg += "\n{0}:".format(_("Publisher")) - msg += " {0}".format(pub) - for uri in uris: - msg += "\n {0}:\n".format(_("Origin URI")) - msg += " {0}\n".format(uri) - msg += " {0}:\n".format(_("Certificate")) - msg += " {0}\n".format(uri.ssl_cert) - msg += " {0}:\n".format(_("Key")) - msg += " {0}\n".format(uri.ssl_key) - return _("One or more client key and certificate files have " - "expired. Please\nupdate the configuration for the " - "publishers or origins listed below:\n {0}").format(msg) + """Used to collect ExpiredCertficate exceptions.""" + + def __init__(self, errors): + self.errors = [] + + assert isinstance(errors, (list, tuple, set, ExpiredCertificate)) + + if isinstance(errors, ExpiredCertificate): + self.errors.append(errors) + else: + self.errors = errors + + def __str__(self): + pdict = dict() + for e in self.errors: + if e.publisher in pdict: + pdict[e.publisher].append(e.uri) + else: + pdict[e.publisher] = [e.uri] + + msg = "" + for pub, uris in pdict.items(): + msg += "\n{0}:".format(_("Publisher")) + msg += " {0}".format(pub) + for uri in uris: + msg += "\n {0}:\n".format(_("Origin URI")) + msg += " {0}\n".format(uri) + msg += " {0}:\n".format(_("Certificate")) + msg += " {0}\n".format(uri.ssl_cert) + msg += " {0}:\n".format(_("Key")) + msg += " {0}\n".format(uri.ssl_key) + return _( + "One or more client key and certificate files have " + "expired. Please\nupdate the configuration for the " + "publishers or origins listed below:\n {0}" + ).format(msg) class ExpiringCertificate(CertificateError): - """Used to indicate that a certificate has expired.""" - - def __str__(self): - publisher = self._args.get("publisher", None) - uri = self._args.get("uri", None) - days = self._args.get("days", 0) - if publisher: - if uri: - return _("Certificate '{cert}' for publisher " - "'{pub}', needed to access '{uri}', " - "will expire in '{days}' days.").format( - cert=self.data, pub=publisher, - uri=uri, days=days) - return _("Certificate '{cert}' for publisher " - "'{pub}' will expire in '{days}' days.").format( - cert=self.data, pub=publisher, days=days) - if uri: - return _("Certificate '{cert}', needed to access " - "'{uri}', will expire in '{days}' days.").format( - cert=self.data, uri=uri, days=days) - return _("Certificate '{cert}' will expire in " - "'{days}' days.").format(cert=self.data, days=days) + """Used to indicate that a certificate has expired.""" + + def __str__(self): + publisher = self._args.get("publisher", None) + uri = self._args.get("uri", None) + days = self._args.get("days", 0) + if publisher: + if uri: + return _( + "Certificate '{cert}' for publisher " + "'{pub}', needed to access '{uri}', " + "will expire in '{days}' days." + ).format(cert=self.data, pub=publisher, uri=uri, days=days) + return _( + "Certificate '{cert}' for publisher " + "'{pub}' will expire in '{days}' days." + ).format(cert=self.data, pub=publisher, days=days) + if uri: + return _( + "Certificate '{cert}', needed to access " + "'{uri}', will expire in '{days}' days." + ).format(cert=self.data, uri=uri, days=days) + return _( + "Certificate '{cert}' will expire in " "'{days}' days." + ).format(cert=self.data, days=days) class InvalidCertificate(CertificateError): - """Used to indicate that a certificate is invalid.""" - - def __str__(self): - publisher = self._args.get("publisher", None) - uri = self._args.get("uri", None) - if publisher: - if uri: - return _("Certificate '{cert}' for publisher " - "'{pub}', needed to access '{uri}', is " - "invalid.").format(cert=self.data, - pub=publisher, uri=uri) - return _("Certificate '{cert}' for publisher " - "'{pub}' is invalid.").format(cert=self.data, - pub=publisher) - if uri: - return _("Certificate '{cert}' needed to access " - "'{uri}' is invalid.").format(cert=self.data, - uri=uri) - return _("Invalid certificate '{0}'.").format(self.data) + """Used to indicate that a certificate is invalid.""" + + def __str__(self): + publisher = self._args.get("publisher", None) + uri = self._args.get("uri", None) + if publisher: + if uri: + return _( + "Certificate '{cert}' for publisher " + "'{pub}', needed to access '{uri}', is " + "invalid." + ).format(cert=self.data, pub=publisher, uri=uri) + return _( + "Certificate '{cert}' for publisher " "'{pub}' is invalid." + ).format(cert=self.data, pub=publisher) + if uri: + return _( + "Certificate '{cert}' needed to access " "'{uri}' is invalid." + ).format(cert=self.data, uri=uri) + return _("Invalid certificate '{0}'.").format(self.data) class NoSuchKey(CertificateError): - """Used to indicate that a key could not be found.""" - - def __str__(self): - publisher = self._args.get("publisher", None) - uri = self._args.get("uri", None) - if publisher: - if uri: - return _("Unable to locate key '{key}' for " - "publisher '{pub}' needed to access " - "'{uri}'.").format(key=self.data, - pub=publisher, uri=uri) - return _("Unable to locate key '{key}' for publisher " - "'{pub}'.").format(key=self.data, pub=publisher - ) - if uri: - return _("Unable to locate key '{key}' needed to " - "access '{uri}'.").format(key=self.data, - uri=uri) - return _("Unable to locate key '{0}'.").format(self.data) + """Used to indicate that a key could not be found.""" + + def __str__(self): + publisher = self._args.get("publisher", None) + uri = self._args.get("uri", None) + if publisher: + if uri: + return _( + "Unable to locate key '{key}' for " + "publisher '{pub}' needed to access " + "'{uri}'." + ).format(key=self.data, pub=publisher, uri=uri) + return _( + "Unable to locate key '{key}' for publisher " "'{pub}'." + ).format(key=self.data, pub=publisher) + if uri: + return _( + "Unable to locate key '{key}' needed to " "access '{uri}'." + ).format(key=self.data, uri=uri) + return _("Unable to locate key '{0}'.").format(self.data) class NoSuchCertificate(CertificateError): - """Used to indicate that a certificate could not be found.""" - - def __str__(self): - publisher = self._args.get("publisher", None) - uri = self._args.get("uri", None) - if publisher: - if uri: - return _("Unable to locate certificate " - "'{cert}' for publisher '{pub}' needed " - "to access '{uri}'.").format( - cert=self.data, pub=publisher, - uri=uri) - return _("Unable to locate certificate '{cert}' for " - "publisher '{pub}'.").format(cert=self.data, - pub=publisher) - if uri: - return _("Unable to locate certificate '{cert}' " - "needed to access '{uri}'.").format( - cert=self.data, uri=uri) - return _("Unable to locate certificate '{0}'.").format( - self.data) + """Used to indicate that a certificate could not be found.""" + + def __str__(self): + publisher = self._args.get("publisher", None) + uri = self._args.get("uri", None) + if publisher: + if uri: + return _( + "Unable to locate certificate " + "'{cert}' for publisher '{pub}' needed " + "to access '{uri}'." + ).format(cert=self.data, pub=publisher, uri=uri) + return _( + "Unable to locate certificate '{cert}' for " + "publisher '{pub}'." + ).format(cert=self.data, pub=publisher) + if uri: + return _( + "Unable to locate certificate '{cert}' " + "needed to access '{uri}'." + ).format(cert=self.data, uri=uri) + return _("Unable to locate certificate '{0}'.").format(self.data) class NotYetValidCertificate(CertificateError): - """Used to indicate that a certificate is not yet valid (future - effective date).""" - - def __str__(self): - publisher = self._args.get("publisher", None) - uri = self._args.get("uri", None) - if publisher: - if uri: - return _("Certificate '{cert}' for publisher " - "'{pub}', needed to access '{uri}', " - "has a future effective date.").format( - cert=self.data, pub=publisher, - uri=uri) - return _("Certificate '{cert}' for publisher " - "'{pub}' has a future effective date.").format( - cert=self.data, pub=publisher) - if uri: - return _("Certificate '{cert}' needed to access " - "'{uri}' has a future effective date.").format( - cert=self.data, uri=uri) - return _("Certificate '{0}' has a future effective date.").format( - self.data) + """Used to indicate that a certificate is not yet valid (future + effective date).""" + + def __str__(self): + publisher = self._args.get("publisher", None) + uri = self._args.get("uri", None) + if publisher: + if uri: + return _( + "Certificate '{cert}' for publisher " + "'{pub}', needed to access '{uri}', " + "has a future effective date." + ).format(cert=self.data, pub=publisher, uri=uri) + return _( + "Certificate '{cert}' for publisher " + "'{pub}' has a future effective date." + ).format(cert=self.data, pub=publisher) + if uri: + return _( + "Certificate '{cert}' needed to access " + "'{uri}' has a future effective date." + ).format(cert=self.data, uri=uri) + return _("Certificate '{0}' has a future effective date.").format( + self.data + ) class ServerReturnError(ApiException): - """This exception is used when the server returns a line which the - client cannot parse correctly.""" + """This exception is used when the server returns a line which the + client cannot parse correctly.""" - def __init__(self, line): - ApiException.__init__(self) - self.line = line + def __init__(self, line): + ApiException.__init__(self) + self.line = line - def __str__(self): - return _("Gave a bad response:{0}").format(self.line) + def __str__(self): + return _("Gave a bad response:{0}").format(self.line) class MissingFileArgumentException(ApiException): - """This exception is used when a file was given as an argument but - no such file could be found.""" - def __init__(self, path): - ApiException.__init__(self) - self.path = path + """This exception is used when a file was given as an argument but + no such file could be found.""" - def __str__(self): - return _("Could not find {0}").format(self.path) + def __init__(self, path): + ApiException.__init__(self) + self.path = path + + def __str__(self): + return _("Could not find {0}").format(self.path) class ManifestError(ApiException): - """Base exception class for all manifest exceptions.""" + """Base exception class for all manifest exceptions.""" - def __init__(self, *args, **kwargs): - ApiException.__init__(self, *args, **kwargs) - if args: - self.data = args[0] - else: - self.data = None - self._args = kwargs + def __init__(self, *args, **kwargs): + ApiException.__init__(self, *args, **kwargs) + if args: + self.data = args[0] + else: + self.data = None + self._args = kwargs - def __str__(self): - return str(self.data) + def __str__(self): + return str(self.data) class BadManifestSignatures(ManifestError): - """Used to indicate that the Manifest signatures are not valid.""" + """Used to indicate that the Manifest signatures are not valid.""" - def __str__(self): - if self.data: - return _("The signature data for the manifest of the " - "'{0}' package is not valid.").format(self.data) - return _("The signature data for the manifest is not valid.") + def __str__(self): + if self.data: + return _( + "The signature data for the manifest of the " + "'{0}' package is not valid." + ).format(self.data) + return _("The signature data for the manifest is not valid.") class UnknownErrors(ApiException): - """Used to indicate that one or more exceptions were encountered. - This is intended for use with where multiple exceptions for multiple - files are encountered and the errors have been condensed into a - single exception and re-raised. One example case would be rmtree() - with shutil.Error.""" + """Used to indicate that one or more exceptions were encountered. + This is intended for use with where multiple exceptions for multiple + files are encountered and the errors have been condensed into a + single exception and re-raised. One example case would be rmtree() + with shutil.Error.""" - def __init__(self, msg): - ApiException.__init__(self) - self.__msg = msg + def __init__(self, msg): + ApiException.__init__(self) + self.__msg = msg - def __str__(self): - return self.__msg + def __str__(self): + return self.__msg # Image creation exceptions class ImageCreationException(ApiException): - def __init__(self, path): - ApiException.__init__(self) - self.path = path + def __init__(self, path): + ApiException.__init__(self) + self.path = path - def __str__(self): - raise NotImplementedError() + def __str__(self): + raise NotImplementedError() class ImageAlreadyExists(ImageCreationException): - def __str__(self): - return _("there is already an image at: {0}.\nTo override, use " - "the -f (force) option.").format(self.path) + def __str__(self): + return _( + "there is already an image at: {0}.\nTo override, use " + "the -f (force) option." + ).format(self.path) class ImageCfgEmptyError(ApiException): - """Used to indicate that the image configuration is invalid.""" + """Used to indicate that the image configuration is invalid.""" - def __str__(self): - return _("The configuration data for the image rooted at " - "{0} is empty or missing.").format(self.data) + def __str__(self): + return _( + "The configuration data for the image rooted at " + "{0} is empty or missing." + ).format(self.data) class UnsupportedImageError(ApiException): - """Used to indicate that the image at a specific location is in a format - not supported by this version of the pkg(7) API.""" + """Used to indicate that the image at a specific location is in a format + not supported by this version of the pkg(7) API.""" - def __init__(self, path): - ApiException.__init__(self) - self.path = path + def __init__(self, path): + ApiException.__init__(self) + self.path = path - def __str__(self): - return _("The image rooted at {0} is invalid or is not " - "supported by this version of the packaging system.").format( - self.path) + def __str__(self): + return _( + "The image rooted at {0} is invalid or is not " + "supported by this version of the packaging system." + ).format(self.path) class CreatingImageInNonEmptyDir(ImageCreationException): - def __str__(self): - return _("the specified image path is not empty: {0}.\nTo " - "override, use the -f (force) option.").format(self.path) + def __str__(self): + return _( + "the specified image path is not empty: {0}.\nTo " + "override, use the -f (force) option." + ).format(self.path) def _convert_error(e, ignored_errors=EmptyI): - """Converts the provided exception into an ApiException equivalent if - possible. Returns a new exception object if converted or the original - if not. - - 'ignored_errors' is an optional list of errno values for which None - should be returned. - """ - - if not hasattr(e, "errno"): - return e - if e.errno in ignored_errors: - return None - if e.errno in (errno.EACCES, errno.EPERM): - return PermissionsException(e.filename) - if e.errno == errno.EROFS: - return ReadOnlyFileSystemException(e.filename) + """Converts the provided exception into an ApiException equivalent if + possible. Returns a new exception object if converted or the original + if not. + + 'ignored_errors' is an optional list of errno values for which None + should be returned. + """ + + if not hasattr(e, "errno"): return e + if e.errno in ignored_errors: + return None + if e.errno in (errno.EACCES, errno.EPERM): + return PermissionsException(e.filename) + if e.errno == errno.EROFS: + return ReadOnlyFileSystemException(e.filename) + return e + class LinkedImageException(ApiException): + def __init__( + self, + bundle=None, + lin=None, + exitrv=None, + attach_bad_prop=None, + attach_bad_prop_value=None, + attach_child_notsup=None, + attach_parent_notsup=None, + attach_root_as_child=None, + attach_with_curpath=None, + child_bad_img=None, + child_diverged=None, + child_dup=None, + child_not_in_altroot=None, + child_not_nested=None, + child_op_failed=None, + child_path_notabs=None, + child_unknown=None, + cmd_failed=None, + cmd_output_invalid=None, + detach_child_notsup=None, + detach_from_parent=None, + detach_parent_notsup=None, + img_linked=None, + intermediate_image=None, + lin_malformed=None, + link_to_self=None, + parent_bad_img=None, + parent_bad_notabs=None, + parent_bad_path=None, + parent_nested=None, + parent_not_in_altroot=None, + pkg_op_failed=None, + self_linked=None, + self_not_child=None, + unparsable_output=None, + ): + from pkg.misc import force_str + + self.attach_bad_prop = attach_bad_prop + self.attach_bad_prop_value = attach_bad_prop_value + self.attach_child_notsup = attach_child_notsup + self.attach_parent_notsup = attach_parent_notsup + self.attach_root_as_child = attach_root_as_child + self.attach_with_curpath = attach_with_curpath + self.child_bad_img = child_bad_img + self.child_diverged = child_diverged + self.child_dup = child_dup + self.child_not_in_altroot = child_not_in_altroot + self.child_not_nested = child_not_nested + self.child_op_failed = child_op_failed + self.child_path_notabs = child_path_notabs + self.child_unknown = child_unknown + self.cmd_failed = cmd_failed + self.cmd_output_invalid = cmd_output_invalid + self.detach_child_notsup = detach_child_notsup + self.detach_from_parent = detach_from_parent + self.detach_parent_notsup = detach_parent_notsup + self.img_linked = img_linked + self.intermediate_image = intermediate_image + self.lin_malformed = lin_malformed + self.link_to_self = link_to_self + self.parent_bad_img = parent_bad_img + self.parent_bad_notabs = parent_bad_notabs + self.parent_bad_path = parent_bad_path + self.parent_nested = parent_nested + self.parent_not_in_altroot = parent_not_in_altroot + self.pkg_op_failed = pkg_op_failed + self.self_linked = self_linked + self.self_not_child = self_not_child + self.unparsable_output = unparsable_output + + # first deal with an error bundle + if bundle: + assert type(bundle) in [tuple, list, set] + for e in bundle: + assert isinstance(e, LinkedImageException) + + # set default error return value + if exitrv == None: + exitrv = pkgdefs.EXIT_OOPS + + self.lix_err = None + self.lix_bundle = bundle + self.lix_exitrv = exitrv + return + + err = None + + if attach_bad_prop is not None: + err = _("Invalid linked image attach property: {0}").format( + attach_bad_prop + ) + + if attach_bad_prop_value is not None: + assert type(attach_bad_prop_value) in [tuple, list] + assert len(attach_bad_prop_value) == 2 + err = _( + "Invalid linked image attach property " "value: {0}" + ).format("=".join(attach_bad_prop_value)) + + if attach_child_notsup is not None: + err = _( + "Linked image type does not support child " "attach: {0}" + ).format(attach_child_notsup) + + if attach_parent_notsup is not None: + err = _( + "Linked image type does not support parent " "attach: {0}" + ).format(attach_parent_notsup) + + if attach_root_as_child is not None: + err = _( + "Cannot attach root image as child: {0}".format( + attach_root_as_child + ) + ) + + if attach_with_curpath is not None: + path, curpath = attach_with_curpath + err = _( + "Cannot link images when an image is not at " + "its default location. The image currently " + "located at:\n {curpath}\n" + "is normally located at:\n {path}\n" + ).format( + path=path, + curpath=curpath, + ) + + if child_bad_img is not None: + if exitrv == None: + exitrv = pkgdefs.EXIT_EACCESS + if lin: + err = _( + "Can't initialize child image " "({lin}) at path: {path}" + ).format(lin=lin, path=child_bad_img) + else: + err = _("Can't initialize child image " "at path: {0}").format( + child_bad_img + ) - def __init__(self, bundle=None, lin=None, exitrv=None, - attach_bad_prop=None, - attach_bad_prop_value=None, - attach_child_notsup=None, - attach_parent_notsup=None, - attach_root_as_child=None, - attach_with_curpath=None, - child_bad_img=None, - child_diverged=None, - child_dup=None, - child_not_in_altroot=None, - child_not_nested=None, - child_op_failed=None, - child_path_notabs=None, - child_unknown=None, - cmd_failed=None, - cmd_output_invalid=None, - detach_child_notsup=None, - detach_from_parent=None, - detach_parent_notsup=None, - img_linked=None, - intermediate_image=None, - lin_malformed=None, - link_to_self=None, - parent_bad_img=None, - parent_bad_notabs=None, - parent_bad_path=None, - parent_nested=None, - parent_not_in_altroot=None, - pkg_op_failed=None, - self_linked=None, - self_not_child=None, - unparsable_output=None): - - from pkg.misc import force_str - - self.attach_bad_prop = attach_bad_prop - self.attach_bad_prop_value = attach_bad_prop_value - self.attach_child_notsup = attach_child_notsup - self.attach_parent_notsup = attach_parent_notsup - self.attach_root_as_child = attach_root_as_child - self.attach_with_curpath = attach_with_curpath - self.child_bad_img = child_bad_img - self.child_diverged = child_diverged - self.child_dup = child_dup - self.child_not_in_altroot = child_not_in_altroot - self.child_not_nested = child_not_nested - self.child_op_failed = child_op_failed - self.child_path_notabs = child_path_notabs - self.child_unknown = child_unknown - self.cmd_failed = cmd_failed - self.cmd_output_invalid = cmd_output_invalid - self.detach_child_notsup = detach_child_notsup - self.detach_from_parent = detach_from_parent - self.detach_parent_notsup = detach_parent_notsup - self.img_linked = img_linked - self.intermediate_image = intermediate_image - self.lin_malformed = lin_malformed - self.link_to_self = link_to_self - self.parent_bad_img = parent_bad_img - self.parent_bad_notabs = parent_bad_notabs - self.parent_bad_path = parent_bad_path - self.parent_nested = parent_nested - self.parent_not_in_altroot = parent_not_in_altroot - self.pkg_op_failed = pkg_op_failed - self.self_linked = self_linked - self.self_not_child = self_not_child - self.unparsable_output = unparsable_output - - # first deal with an error bundle - if bundle: - assert type(bundle) in [tuple, list, set] - for e in bundle: - assert isinstance(e, LinkedImageException) - - # set default error return value - if exitrv == None: - exitrv = pkgdefs.EXIT_OOPS - - self.lix_err = None - self.lix_bundle = bundle - self.lix_exitrv = exitrv - return - - err = None - - if attach_bad_prop is not None: - err = _("Invalid linked image attach property: {0}").format( - attach_bad_prop) - - if attach_bad_prop_value is not None: - assert type(attach_bad_prop_value) in [tuple, list] - assert len(attach_bad_prop_value) == 2 - err = _("Invalid linked image attach property " - "value: {0}").format( - "=".join(attach_bad_prop_value)) - - if attach_child_notsup is not None: - err = _("Linked image type does not support child " - "attach: {0}").format(attach_child_notsup) - - if attach_parent_notsup is not None: - err = _("Linked image type does not support parent " - "attach: {0}").format(attach_parent_notsup) - - if attach_root_as_child is not None: - err = _("Cannot attach root image as child: {0}".format( - attach_root_as_child)) - - if attach_with_curpath is not None: - path, curpath = attach_with_curpath - err = _("Cannot link images when an image is not at " - "its default location. The image currently " - "located at:\n {curpath}\n" - "is normally located at:\n {path}\n").format( - path=path, - curpath=curpath, - ) - - if child_bad_img is not None: - if exitrv == None: - exitrv = pkgdefs.EXIT_EACCESS - if lin: - err = _("Can't initialize child image " - "({lin}) at path: {path}").format( - lin=lin, - path=child_bad_img - ) - else: - err = _("Can't initialize child image " - "at path: {0}").format(child_bad_img) - - if child_diverged is not None: - if exitrv == None: - exitrv = pkgdefs.EXIT_DIVERGED - err = _("Linked image is diverged: {0}").format( - child_diverged) - - if child_dup is not None: - err = _("A linked child image with this name " - "already exists: {0}").format(child_dup) - - if child_not_in_altroot is not None: - path, altroot = child_not_in_altroot - err = _("Child image '{path}' is not located " - "within the parent's altroot '{altroot}'").format( - path=path, - altroot=altroot - ) - - if child_not_nested is not None: - cpath, ppath = child_not_nested - err = _("Child image '{cpath}' is not nested " - "within the parent image '{ppath}'").format( - cpath=cpath, - ppath=ppath, - ) - - if child_op_failed is not None: - op, cpath, e = child_op_failed - if exitrv == None: - exitrv = pkgdefs.EXIT_EACCESS - if lin: - err = _("Failed '{op}' for child image " - "({lin}) at path: {path}: " - "{strerror}").format( - op=op, - lin=lin, - path=cpath, - strerror=e, - ) - else: - err = _("Failed '{op}' for child image " - "at path: {path}: {strerror}").format( - op=op, - path=cpath, - strerror=e, - ) - - if child_path_notabs is not None: - err = _("Child path not absolute: {0}").format( - child_path_notabs) - - if child_unknown is not None: - err = _("Unknown child linked image: {0}").format( - child_unknown) - - if cmd_failed is not None: - (rv, cmd, errout) = cmd_failed - err = _("The following subprocess returned an " - "unexpected exit code of {rv:d}:\n {cmd}").format( - rv=rv, cmd=cmd) - if errout: - err += _("\nAnd generated the following error " - "message:\n{errout}".format( - errout=force_str(errout))) - - if cmd_output_invalid is not None: - (cmd, output) = cmd_output_invalid - err = _( - "The following subprocess:\n" - " {cmd}\n" - "Generated the following unexpected output:\n" - "{output}\n".format( - cmd=" ".join(cmd), output="\n".join(output))) - - if detach_child_notsup is not None: - err = _("Linked image type does not support " - "child detach: {0}").format(detach_child_notsup) - - if detach_from_parent is not None: - if exitrv == None: - exitrv = pkgdefs.EXIT_PARENTOP - err = _("Parent linked to child, can not detach " - "child: {0}").format(detach_from_parent) - - if detach_parent_notsup is not None: - err = _("Linked image type does not support " - "parent detach: {0}").format(detach_parent_notsup) - - if img_linked is not None: - err = _("Image already a linked child: {0}").format( - img_linked) - - if intermediate_image is not None: - ppath, cpath, ipath = intermediate_image - err = _( - "Intermediate image '{ipath}' found between " - "child '{cpath}' and " - "parent '{ppath}'").format( - ppath=ppath, - cpath=cpath, - ipath=ipath, - ) - - if lin_malformed is not None: - err = _("Invalid linked image name '{0}'. " - "Linked image names have the following format " - "':'").format( - lin_malformed) - - if link_to_self is not None: - err = _("Can't link image to itself: {0}") - - if parent_bad_img is not None: - if exitrv == None: - exitrv = pkgdefs.EXIT_EACCESS - err = _("Can't initialize parent image at path: {0}").format( - parent_bad_img) - - if parent_bad_notabs is not None: - err = _("Parent path not absolute: {0}").format( - parent_bad_notabs) - - if parent_bad_path is not None: - if exitrv == None: - exitrv = pkgdefs.EXIT_EACCESS - err = _("Can't access parent image at path: {0}").format( - parent_bad_path) - - if parent_nested is not None: - ppath, cpath = parent_nested - err = _("A parent image '{ppath}' can not be nested " - "within a child image '{cpath}'").format( - ppath=ppath, - cpath=cpath, - ) - - if parent_not_in_altroot is not None: - path, altroot = parent_not_in_altroot - err = _("Parent image '{path}' is not located " - "within the child's altroot '{altroot}'").format( - path=path, - altroot=altroot - ) - - if pkg_op_failed is not None: - assert lin - (op, exitrv, errout, e) = pkg_op_failed - assert op is not None - - if e is None: - err = _(""" + if child_diverged is not None: + if exitrv == None: + exitrv = pkgdefs.EXIT_DIVERGED + err = _("Linked image is diverged: {0}").format(child_diverged) + + if child_dup is not None: + err = _( + "A linked child image with this name " "already exists: {0}" + ).format(child_dup) + + if child_not_in_altroot is not None: + path, altroot = child_not_in_altroot + err = _( + "Child image '{path}' is not located " + "within the parent's altroot '{altroot}'" + ).format(path=path, altroot=altroot) + + if child_not_nested is not None: + cpath, ppath = child_not_nested + err = _( + "Child image '{cpath}' is not nested " + "within the parent image '{ppath}'" + ).format( + cpath=cpath, + ppath=ppath, + ) + + if child_op_failed is not None: + op, cpath, e = child_op_failed + if exitrv == None: + exitrv = pkgdefs.EXIT_EACCESS + if lin: + err = _( + "Failed '{op}' for child image " + "({lin}) at path: {path}: " + "{strerror}" + ).format( + op=op, + lin=lin, + path=cpath, + strerror=e, + ) + else: + err = _( + "Failed '{op}' for child image " + "at path: {path}: {strerror}" + ).format( + op=op, + path=cpath, + strerror=e, + ) + + if child_path_notabs is not None: + err = _("Child path not absolute: {0}").format(child_path_notabs) + + if child_unknown is not None: + err = _("Unknown child linked image: {0}").format(child_unknown) + + if cmd_failed is not None: + (rv, cmd, errout) = cmd_failed + err = _( + "The following subprocess returned an " + "unexpected exit code of {rv:d}:\n {cmd}" + ).format(rv=rv, cmd=cmd) + if errout: + err += _( + "\nAnd generated the following error " + "message:\n{errout}".format(errout=force_str(errout)) + ) + + if cmd_output_invalid is not None: + (cmd, output) = cmd_output_invalid + err = _( + "The following subprocess:\n" + " {cmd}\n" + "Generated the following unexpected output:\n" + "{output}\n".format(cmd=" ".join(cmd), output="\n".join(output)) + ) + + if detach_child_notsup is not None: + err = _( + "Linked image type does not support " "child detach: {0}" + ).format(detach_child_notsup) + + if detach_from_parent is not None: + if exitrv == None: + exitrv = pkgdefs.EXIT_PARENTOP + err = _( + "Parent linked to child, can not detach " "child: {0}" + ).format(detach_from_parent) + + if detach_parent_notsup is not None: + err = _( + "Linked image type does not support " "parent detach: {0}" + ).format(detach_parent_notsup) + + if img_linked is not None: + err = _("Image already a linked child: {0}").format(img_linked) + + if intermediate_image is not None: + ppath, cpath, ipath = intermediate_image + err = _( + "Intermediate image '{ipath}' found between " + "child '{cpath}' and " + "parent '{ppath}'" + ).format( + ppath=ppath, + cpath=cpath, + ipath=ipath, + ) + + if lin_malformed is not None: + err = _( + "Invalid linked image name '{0}'. " + "Linked image names have the following format " + "':'" + ).format(lin_malformed) + + if link_to_self is not None: + err = _("Can't link image to itself: {0}") + + if parent_bad_img is not None: + if exitrv == None: + exitrv = pkgdefs.EXIT_EACCESS + err = _("Can't initialize parent image at path: {0}").format( + parent_bad_img + ) + + if parent_bad_notabs is not None: + err = _("Parent path not absolute: {0}").format(parent_bad_notabs) + + if parent_bad_path is not None: + if exitrv == None: + exitrv = pkgdefs.EXIT_EACCESS + err = _("Can't access parent image at path: {0}").format( + parent_bad_path + ) + + if parent_nested is not None: + ppath, cpath = parent_nested + err = _( + "A parent image '{ppath}' can not be nested " + "within a child image '{cpath}'" + ).format( + ppath=ppath, + cpath=cpath, + ) + + if parent_not_in_altroot is not None: + path, altroot = parent_not_in_altroot + err = _( + "Parent image '{path}' is not located " + "within the child's altroot '{altroot}'" + ).format(path=path, altroot=altroot) + + if pkg_op_failed is not None: + assert lin + (op, exitrv, errout, e) = pkg_op_failed + assert op is not None + + if e is None: + err = _( + """ A '{op}' operation failed for child '{lin}' with an unexpected return value of {exitrv:d} and generated the following output: {errout} """ - ).format( - lin=lin, - op=op, - exitrv=exitrv, - errout=force_str(errout), - ) - else: - err = _(""" + ).format( + lin=lin, + op=op, + exitrv=exitrv, + errout=force_str(errout), + ) + else: + err = _( + """ A '{op}' operation failed for child '{lin}' with an unexpected exception: {e} @@ -3258,26 +3650,29 @@ def __init__(self, bundle=None, lin=None, exitrv=None, {errout} """ - ).format( - lin=lin, - op=op, - errout=force_str(errout), - e=e, - ) - - if self_linked is not None: - err = _("Current image already a linked child: {0}").format( - self_linked) - - if self_not_child is not None: - if exitrv == None: - exitrv = pkgdefs.EXIT_NOPARENT - err = _("Current image is not a linked child: {0}").format( - self_not_child) - - if unparsable_output is not None: - (op, errout, e) = unparsable_output - err = _(""" + ).format( + lin=lin, + op=op, + errout=force_str(errout), + e=e, + ) + + if self_linked is not None: + err = _("Current image already a linked child: {0}").format( + self_linked + ) + + if self_not_child is not None: + if exitrv == None: + exitrv = pkgdefs.EXIT_NOPARENT + err = _("Current image is not a linked child: {0}").format( + self_not_child + ) + + if unparsable_output is not None: + (op, errout, e) = unparsable_output + err = _( + """ A '{op}' operation for child '{lin}' generated non-json output. The json parser failed with the following error: {e} @@ -3286,279 +3681,318 @@ def __init__(self, bundle=None, lin=None, exitrv=None, {errout} """ - ).format( - lin=lin, - op=op, - e=e, - errout=force_str(errout), - ) - - # set default error return value - if exitrv == None: - exitrv = pkgdefs.EXIT_OOPS - - self.lix_err = err - self.lix_bundle = None - self.lix_exitrv = exitrv - - def __str__(self): - assert self.lix_err or self.lix_bundle - assert not (self.lix_err and self.lix_bundle), \ - "self.lix_err = {0}, self.lix_bundle = {1}".format( - str(self.lix_err), str(self.lix_bundle)) - - # single error - if self.lix_err: - return self.lix_err - - # concatenate multiple errors - bundle_str = [] - for e in self.lix_bundle: - bundle_str.append(str(e)) - return "\n".join(bundle_str) + ).format( + lin=lin, + op=op, + e=e, + errout=force_str(errout), + ) + + # set default error return value + if exitrv == None: + exitrv = pkgdefs.EXIT_OOPS + + self.lix_err = err + self.lix_bundle = None + self.lix_exitrv = exitrv + + def __str__(self): + assert self.lix_err or self.lix_bundle + assert not ( + self.lix_err and self.lix_bundle + ), "self.lix_err = {0}, self.lix_bundle = {1}".format( + str(self.lix_err), str(self.lix_bundle) + ) + + # single error + if self.lix_err: + return self.lix_err + + # concatenate multiple errors + bundle_str = [] + for e in self.lix_bundle: + bundle_str.append(str(e)) + return "\n".join(bundle_str) class FreezePkgsException(ApiException): - """Used if an argument to pkg freeze isn't valid.""" - - def __init__(self, multiversions=None, unmatched_wildcards=None, - version_mismatch=None, versionless_uninstalled=None): - ApiException.__init__(self) - self.multiversions = multiversions - self.unmatched_wildcards = unmatched_wildcards - self.version_mismatch = version_mismatch - self.versionless_uninstalled = versionless_uninstalled - - def __str__(self): - res = [] - if self.multiversions: - s = _("""\ + """Used if an argument to pkg freeze isn't valid.""" + + def __init__( + self, + multiversions=None, + unmatched_wildcards=None, + version_mismatch=None, + versionless_uninstalled=None, + ): + ApiException.__init__(self) + self.multiversions = multiversions + self.unmatched_wildcards = unmatched_wildcards + self.version_mismatch = version_mismatch + self.versionless_uninstalled = versionless_uninstalled + + def __str__(self): + res = [] + if self.multiversions: + s = _( + """\ The following packages were frozen at two different versions by the patterns provided. The package stem and the versions it was frozen at are -provided:""") - res += [s] - res += ["\t{0}\t{1}".format(stem, " ".join([ - str(v) for v in sorted(versions)])) - for stem, versions in sorted(self.multiversions)] - - if self.unmatched_wildcards: - s = _("""\ -The following patterns contained wildcards but matched no -installed packages.""") - res += [s] - res += ["\t{0}".format(pat) for pat in sorted( - self.unmatched_wildcards)] +provided:""" + ) + res += [s] + res += [ + "\t{0}\t{1}".format( + stem, " ".join([str(v) for v in sorted(versions)]) + ) + for stem, versions in sorted(self.multiversions) + ] - if self.version_mismatch: - s = _("""\ + if self.unmatched_wildcards: + s = _( + """\ +The following patterns contained wildcards but matched no +installed packages.""" + ) + res += [s] + res += [ + "\t{0}".format(pat) for pat in sorted(self.unmatched_wildcards) + ] + + if self.version_mismatch: + s = _( + """\ The following patterns attempted to freeze the listed packages -at a version different from the version at which the packages are installed.""") - res += [s] - for pat in sorted(self.version_mismatch): - res += ["\t{0}".format(pat)] - if len(self.version_mismatch[pat]) > 1: - res += [ - "\t\t{0}".format(stem) - for stem - in sorted(self.version_mismatch[pat]) - ] - - if self.versionless_uninstalled: - s = _("""\ +at a version different from the version at which the packages are installed.""" + ) + res += [s] + for pat in sorted(self.version_mismatch): + res += ["\t{0}".format(pat)] + if len(self.version_mismatch[pat]) > 1: + res += [ + "\t\t{0}".format(stem) + for stem in sorted(self.version_mismatch[pat]) + ] + + if self.versionless_uninstalled: + s = _( + """\ The following patterns don't match installed packages and contain no version information. Uninstalled packages can only be frozen by -providing a version at which to freeze them.""") - res += [s] - res += ["\t{0}".format(p) for p in sorted( - self.versionless_uninstalled)] - return "\n".join(res) +providing a version at which to freeze them.""" + ) + res += [s] + res += [ + "\t{0}".format(p) for p in sorted(self.versionless_uninstalled) + ] + return "\n".join(res) + class InvalidFreezeFile(ApiException): - """Used to indicate the freeze state file could not be loaded.""" + """Used to indicate the freeze state file could not be loaded.""" + + def __str__(self): + return _("The freeze state file '{0}' is invalid.").format(self.data) - def __str__(self): - return _("The freeze state file '{0}' is invalid.").format( - self.data) class UnknownFreezeFileVersion(ApiException): - """Used when the version on the freeze state file isn't the version - that's expected.""" - - def __init__(self, found_ver, expected_ver, location): - self.found = found_ver - self.expected = expected_ver - self.loc = location - - def __str__(self): - return _("The freeze state file '{loc}' was expected to have " - "a version of {exp}, but its version was {found}").format( - exp=self.expected, - found=self.found, - loc=self.loc, - ) + """Used when the version on the freeze state file isn't the version + that's expected.""" -class InvalidOptionError(ApiException): - """Used to indicate an issue with verifying options passed to a certain - operation.""" - - GENERIC = "generic" # generic option violation - OPT_REPEAT = "opt_repeat" # option repetition is not allowed - ARG_REPEAT = "arg_repeat" # argument repetition is not allowed - ARG_INVALID = "arg_invalid" # argument is invalid - INCOMPAT = "incompat" # option 'a' can not be specified with option 'b' - REQUIRED = "required" # option 'a' requires option 'b' - REQUIRED_ANY = "required_any" # option 'a' requires option 'b', 'c' or more - XOR = "xor" # either option 'a' or option 'b' must be specified - - def __init__(self, err_type=GENERIC, options=[], msg=None, - valid_args=[]): - - self.err_type = err_type - self.options = options - self.msg = msg - self.valid_args = valid_args - - def __str__(self): - - # In case the user provided a custom message we just take it and - # append the according options. - if self.msg is not None: - if self.options: - self.msg += ": " - self.msg += " ".join(self.options) - return self.msg - - if self.err_type == self.OPT_REPEAT: - assert len(self.options) == 1 - return _("Option '{option}' may not be repeated.").format( - option=self.options[0]) - elif self.err_type == self.ARG_REPEAT: - assert len(self.options) == 2 - return _("Argument '{op1}' for option '{op2}' may " - "not be repeated.").format(op1=self.options[0], - op2=self.options[1]) - elif self.err_type == self.ARG_INVALID: - assert len(self.options) == 2 - s = _("Argument '{op1}' for option '{op2}' is " - "invalid.").format(op1=self.options[0], - op2=self.options[1]) - if self.valid_args: - s += _("\nSupported: {0}").format(", ".join( - [str(va) for va in self.valid_args])) - return s - elif self.err_type == self.INCOMPAT: - assert len(self.options) == 2 - return _("The '{op1}' and '{op2}' options may " - "not be combined.").format(op1=self.options[0], - op2=self.options[1]) - elif self.err_type == self.REQUIRED: - assert len(self.options) == 2 - return _("'{op1}' may only be used with " - "'{op2}'.").format(op1=self.options[0], - op2=self.options[1]) - elif self.err_type == self.REQUIRED_ANY: - assert len(self.options) > 2 - return _("'{op1}' may only be used with " - "'{op2}' or {op3}.").format(op1=self.options[0], - op2=", ".join(self.options[1:-1]), - op3=self.options[-1]) - elif self.err_type == self.XOR: - assert len(self.options) == 2 - return _("Either '{op1}' or '{op2}' must be " - "specified").format(op1=self.options[0], - op2=self.options[1]) - else: - return _("invalid option(s): ") + " ".join(self.options) + def __init__(self, found_ver, expected_ver, location): + self.found = found_ver + self.expected = expected_ver + self.loc = location -class InvalidOptionErrors(ApiException): + def __str__(self): + return _( + "The freeze state file '{loc}' was expected to have " + "a version of {exp}, but its version was {found}" + ).format( + exp=self.expected, + found=self.found, + loc=self.loc, + ) - def __init__(self, errors): - self.errors = [] +class InvalidOptionError(ApiException): + """Used to indicate an issue with verifying options passed to a certain + operation.""" + + GENERIC = "generic" # generic option violation + OPT_REPEAT = "opt_repeat" # option repetition is not allowed + ARG_REPEAT = "arg_repeat" # argument repetition is not allowed + ARG_INVALID = "arg_invalid" # argument is invalid + INCOMPAT = "incompat" # option 'a' can not be specified with option 'b' + REQUIRED = "required" # option 'a' requires option 'b' + REQUIRED_ANY = "required_any" # option 'a' requires option 'b', 'c' or more + XOR = "xor" # either option 'a' or option 'b' must be specified + + def __init__(self, err_type=GENERIC, options=[], msg=None, valid_args=[]): + self.err_type = err_type + self.options = options + self.msg = msg + self.valid_args = valid_args + + def __str__(self): + # In case the user provided a custom message we just take it and + # append the according options. + if self.msg is not None: + if self.options: + self.msg += ": " + self.msg += " ".join(self.options) + return self.msg + + if self.err_type == self.OPT_REPEAT: + assert len(self.options) == 1 + return _("Option '{option}' may not be repeated.").format( + option=self.options[0] + ) + elif self.err_type == self.ARG_REPEAT: + assert len(self.options) == 2 + return _( + "Argument '{op1}' for option '{op2}' may " "not be repeated." + ).format(op1=self.options[0], op2=self.options[1]) + elif self.err_type == self.ARG_INVALID: + assert len(self.options) == 2 + s = _("Argument '{op1}' for option '{op2}' is " "invalid.").format( + op1=self.options[0], op2=self.options[1] + ) + if self.valid_args: + s += _("\nSupported: {0}").format( + ", ".join([str(va) for va in self.valid_args]) + ) + return s + elif self.err_type == self.INCOMPAT: + assert len(self.options) == 2 + return _( + "The '{op1}' and '{op2}' options may " "not be combined." + ).format(op1=self.options[0], op2=self.options[1]) + elif self.err_type == self.REQUIRED: + assert len(self.options) == 2 + return _("'{op1}' may only be used with " "'{op2}'.").format( + op1=self.options[0], op2=self.options[1] + ) + elif self.err_type == self.REQUIRED_ANY: + assert len(self.options) > 2 + return _( + "'{op1}' may only be used with " "'{op2}' or {op3}." + ).format( + op1=self.options[0], + op2=", ".join(self.options[1:-1]), + op3=self.options[-1], + ) + elif self.err_type == self.XOR: + assert len(self.options) == 2 + return _("Either '{op1}' or '{op2}' must be " "specified").format( + op1=self.options[0], op2=self.options[1] + ) + else: + return _("invalid option(s): ") + " ".join(self.options) - assert (isinstance(errors, list) or isinstance(errors, tuple) or - isinstance(errors, set) or - isinstance(errors, InvalidOptionError)) - if isinstance(errors, InvalidOptionError): - self.errors.append(errors) - else: - self.errors = errors +class InvalidOptionErrors(ApiException): + def __init__(self, errors): + self.errors = [] - def __str__(self): - msgs = [] - for e in self.errors: - msgs.append(str(e)) - return "\n".join(msgs) + assert ( + isinstance(errors, list) + or isinstance(errors, tuple) + or isinstance(errors, set) + or isinstance(errors, InvalidOptionError) + ) -class UnexpectedLinkError(ApiException): - """Used to indicate that an image state file has been replaced - with a symlink.""" + if isinstance(errors, InvalidOptionError): + self.errors.append(errors) + else: + self.errors = errors - def __init__(self, path, filename, errno): - self.path = path - self.filename = filename - self.errno = errno + def __str__(self): + msgs = [] + for e in self.errors: + msgs.append(str(e)) + return "\n".join(msgs) - def __str__(self): - return _("Cannot update file: '{file}' at path " - "'{path}', contains a symlink. " - "[Error '{errno:d}': '{error}']").format( - error=os.strerror(self.errno), - errno=self.errno, - path=self.path, - file=self.filename, - ) + +class UnexpectedLinkError(ApiException): + """Used to indicate that an image state file has been replaced + with a symlink.""" + + def __init__(self, path, filename, errno): + self.path = path + self.filename = filename + self.errno = errno + + def __str__(self): + return _( + "Cannot update file: '{file}' at path " + "'{path}', contains a symlink. " + "[Error '{errno:d}': '{error}']" + ).format( + error=os.strerror(self.errno), + errno=self.errno, + path=self.path, + file=self.filename, + ) class InvalidConfigFile(ApiException): - """Used to indicate that a configuration file is invalid - or broken""" + """Used to indicate that a configuration file is invalid + or broken""" - def __init__(self, path): - self.path = path + def __init__(self, path): + self.path = path - def __str__(self): - return _("Cannot parse configuration file " - "{path}'.").format(path=self.path) + def __str__(self): + return _("Cannot parse configuration file " "{path}'.").format( + path=self.path + ) class PkgUnicodeDecodeError(UnicodeDecodeError): - def __init__(self, obj, *args): - self.obj = obj - UnicodeDecodeError.__init__(self, *args) + def __init__(self, obj, *args): + self.obj = obj + UnicodeDecodeError.__init__(self, *args) + + def __str__(self): + s = UnicodeDecodeError.__str__(self) + return "{0}. You passed in {1!r} {2}".format( + s, self.obj, type(self.obj) + ) - def __str__(self): - s = UnicodeDecodeError.__str__(self) - return "{0}. You passed in {1!r} {2}".format(s, self.obj, - type(self.obj)) class UnsupportedVariantGlobbing(ApiException): - """Used to indicate that globbing for variant is not supported.""" + """Used to indicate that globbing for variant is not supported.""" + + def __str__(self): + return _("Globbing is not supported for variants.") - def __str__(self): - return _("Globbing is not supported for variants.") class UnsupportedFacetChange(ApiException): - """Used to indicate an unsupported facet change.""" + """Used to indicate an unsupported facet change.""" - def __init__(self, facet, value=None): - self.facet = facet - self.value = value + def __init__(self, facet, value=None): + self.facet = facet + self.value = value + + def __str__(self): + return _( + "Changing '{facet}' to '{value}' is not supported.".format( + facet=self.facet, value=self.value + ) + ) - def __str__(self): - return _("Changing '{facet}' to '{value}' is not supported.". - format(facet=self.facet, value=self.value)) class InvalidMediatorTarget(ApiException): - """ Used to indicate if the target of a mediated link is missing, - which could lead to a broken system on a reboot.""" + """Used to indicate if the target of a mediated link is missing, + which could lead to a broken system on a reboot.""" + + def __init__(self, medmsg): + self.medmsg = medmsg - def __init__(self, medmsg): - self.medmsg = medmsg + def __str__(self): + return self.medmsg - def __str__(self): - return self.medmsg # Vim hints # vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/bootenv.py b/src/modules/client/bootenv.py index b2e452edf..6bf15faf2 100644 --- a/src/modules/client/bootenv.py +++ b/src/modules/client/bootenv.py @@ -29,6 +29,7 @@ import tempfile from pkg.client import global_settings + logger = global_settings.logger import pkg.client.api_errors as api_errors @@ -39,833 +40,875 @@ # Since pkg(1) may be installed without libbe installed # check for libbe and import it if it exists. try: - import libbe_py as be + import libbe_py as be except ImportError: - # All recovery actions are disabled when libbe can't be - # imported. - pass + # All recovery actions are disabled when libbe can't be + # imported. + pass -class BootEnv(object): - """A BootEnv object is an object containing the logic for managing the - recovery of image-modifying operations such as install, uninstall, and - update. - - Recovery is only enabled for ZFS filesystems. Any operation attempted on - UFS will not be handled by BootEnv. - - This class makes use of usr/lib/python*/vendor-packages/libbe.py as the - python wrapper for interfacing with libbe. Both libraries are delivered - by the install/beadm package. This package is not required for pkg(1) - to operate successfully. It is soft required, meaning if it exists the - bootenv class will attempt to provide recovery support.""" - - def __init__(self, img, progress_tracker=None): - self.be_name = None - self.dataset = None - self.be_name_clone = None - self.be_name_clone_uuid = None - self.clone_dir = None - self.img = img - self.is_live_BE = False - self.is_valid = False - self.snapshot_name = None - self.progress_tracker = progress_tracker - - # record current location of image root so we can remember - # original source BE if we clone existing image - self.root = self.img.get_root() - rc = 0 - - assert self.root != None - - # Need to find the name of the BE we're operating on in order - # to create a snapshot and/or a clone of the BE. - self.beList = self.get_be_list(raise_error=True) - - for i, beVals in enumerate(self.beList): - # pkg(1) expects a directory as the target of an - # operation. BootEnv needs to determine if this target - # directory maps to a BE. If a bogus directory is - # provided to pkg(1) via -R, then pkg(1) just updates - # '/' which also causes BootEnv to manage '/' as well. - # This should be fixed before this class is ever - # instantiated. - - be_name = beVals.get("orig_be_name") - - # If we're not looking at a boot env entry or an - # entry that is not mounted then continue. - if not be_name or not beVals.get("mounted"): - continue - - # Check if we're operating on the live BE. - # If so it must also be active. If we are not - # operating on the live BE, then verify - # that the mountpoint of the BE matches - # the -R argument passed in by the user. - if self.root == '/': - if not beVals.get("active"): - continue - else: - self.is_live_BE = True - else: - if beVals.get("mountpoint") != self.root: - continue - - # Set the needed BE components so snapshots - # and clones can be managed. - self.be_name = be_name - - self.dataset = beVals.get("dataset") - - # Let libbe provide the snapshot name - err, snapshot_name = be.beCreateSnapshot(self.be_name) - self.clone_dir = tempfile.mkdtemp() - - # Check first field for failure. - # 2nd field is the returned snapshot name - if err == 0: - self.snapshot_name = snapshot_name - # we require BootEnv to be initialised within - # the context of a history operation, i.e. - # after img.history.operation_name has been set. - img.history.operation_snapshot = snapshot_name - else: - logger.error(_("pkg: unable to create an auto " - "snapshot. pkg recovery is disabled.")) - raise RuntimeError("recoveryDisabled") - self.is_valid = True - break +class BootEnv(object): + """A BootEnv object is an object containing the logic for managing the + recovery of image-modifying operations such as install, uninstall, and + update. + + Recovery is only enabled for ZFS filesystems. Any operation attempted on + UFS will not be handled by BootEnv. + + This class makes use of usr/lib/python*/vendor-packages/libbe.py as the + python wrapper for interfacing with libbe. Both libraries are delivered + by the install/beadm package. This package is not required for pkg(1) + to operate successfully. It is soft required, meaning if it exists the + bootenv class will attempt to provide recovery support.""" + + def __init__(self, img, progress_tracker=None): + self.be_name = None + self.dataset = None + self.be_name_clone = None + self.be_name_clone_uuid = None + self.clone_dir = None + self.img = img + self.is_live_BE = False + self.is_valid = False + self.snapshot_name = None + self.progress_tracker = progress_tracker + + # record current location of image root so we can remember + # original source BE if we clone existing image + self.root = self.img.get_root() + rc = 0 + + assert self.root != None + + # Need to find the name of the BE we're operating on in order + # to create a snapshot and/or a clone of the BE. + self.beList = self.get_be_list(raise_error=True) + + for i, beVals in enumerate(self.beList): + # pkg(1) expects a directory as the target of an + # operation. BootEnv needs to determine if this target + # directory maps to a BE. If a bogus directory is + # provided to pkg(1) via -R, then pkg(1) just updates + # '/' which also causes BootEnv to manage '/' as well. + # This should be fixed before this class is ever + # instantiated. + + be_name = beVals.get("orig_be_name") + + # If we're not looking at a boot env entry or an + # entry that is not mounted then continue. + if not be_name or not beVals.get("mounted"): + continue + + # Check if we're operating on the live BE. + # If so it must also be active. If we are not + # operating on the live BE, then verify + # that the mountpoint of the BE matches + # the -R argument passed in by the user. + if self.root == "/": + if not beVals.get("active"): + continue else: - # We will get here if we don't find find any BE's. e.g - # if were are on UFS. - raise RuntimeError("recoveryDisabled") - - def get_new_be_name(self, new_bename=None, suffix=None): - """Create a new boot environment name.""" - - if new_bename == None: - new_bename = self.be_name - if suffix: - new_bename += suffix - - base, sep, rev = new_bename.rpartition("-") - if sep and rev.isdigit(): - maxrev = int(rev) + self.is_live_BE = True + else: + if beVals.get("mountpoint") != self.root: + continue + + # Set the needed BE components so snapshots + # and clones can be managed. + self.be_name = be_name + + self.dataset = beVals.get("dataset") + + # Let libbe provide the snapshot name + err, snapshot_name = be.beCreateSnapshot(self.be_name) + self.clone_dir = tempfile.mkdtemp() + + # Check first field for failure. + # 2nd field is the returned snapshot name + if err == 0: + self.snapshot_name = snapshot_name + # we require BootEnv to be initialised within + # the context of a history operation, i.e. + # after img.history.operation_name has been set. + img.history.operation_snapshot = snapshot_name + else: + logger.error( + _( + "pkg: unable to create an auto " + "snapshot. pkg recovery is disabled." + ) + ) + raise RuntimeError("recoveryDisabled") + self.is_valid = True + break + + else: + # We will get here if we don't find find any BE's. e.g + # if were are on UFS. + raise RuntimeError("recoveryDisabled") + + def get_new_be_name(self, new_bename=None, suffix=None): + """Create a new boot environment name.""" + + if new_bename == None: + new_bename = self.be_name + if suffix: + new_bename += suffix + + base, sep, rev = new_bename.rpartition("-") + if sep and rev.isdigit(): + maxrev = int(rev) + else: + # new_bename does not include a numerical suffix + # so start with the bare name + base = new_bename + maxrev = 0 + + # List all BEs, cycle through the names and find the + # one with the same basename as new_bename which has the + # highest revision. This revision will be the starting + # point for building the new BE name so that gaps in the + # numbering are not filled. + + for d in self.beList: + oben = d.get("orig_be_name", None) + if not oben: + continue + nbase, sep, nrev = oben.rpartition("-") + if not sep or nbase != base or not nrev.isdigit(): + continue + maxrev = max(int(nrev), maxrev) + + good = False + num = maxrev + while not good: + if num > 0: + new_bename = "-".join((base, str(num))) + else: + new_bename = base + for d in self.beList: + oben = d.get("orig_be_name", None) + if not oben: + continue + if oben == new_bename: + # Already exists + break + else: + good = True + + num += 1 + + return new_bename + + def __store_image_state(self): + """Internal function used to preserve current image information + and history state to be restored later with __reset_image_state + if needed.""" + + # Preserve the current history information and state so that if + # boot environment operations fail, they can be written to the + # original image root, etc. + self.img.history.create_snapshot() + + def __reset_image_state(self, failure=False): + """Internal function intended to be used to reset the image + state, if needed, after the failure or success of boot + environment operations.""" + + if not self.img: + # Nothing to restore. + return + + if self.root != self.img.root: + if failure: + # Since the image root changed and the operation + # was not successful, restore the original + # history and state information so that it can + # be recorded in the original image root. This + # needs to be done before the image root is + # reset since it might fail. + self.img.history.restore_snapshot() + + self.img.history.discard_snapshot() + + # After the completion of an operation that has changed + # the image root, it needs to be reset back to its + # original value so that the client will read and write + # information using the correct location (this is + # especially important for bootenv operations). + self.img.find_root(self.root) + else: + self.img.history.discard_snapshot() + + def exists(self): + """Return true if this object represents a valid BE.""" + + return self.is_valid + + @staticmethod + def libbe_exists(): + return True + + @staticmethod + def check_verify(): + return hasattr(be, "beVerifyBEName") + + @staticmethod + def split_be_entry(bee): + name = bee.get("orig_be_name") + return ( + name, + bee.get("active"), + bee.get("active_boot"), + bee.get("space_used"), + bee.get("date"), + ) + + @staticmethod + def copy_be(src_be_name, dst_be_name): + ret, be_name_clone, not_used = be.beCopy(dst_be_name, src_be_name) + if ret != 0: + raise api_errors.UnableToCopyBE() + + @staticmethod + def rename_be(orig_name, new_name): + return be.beRename(orig_name, new_name) + + @staticmethod + def destroy_be(be_name): + return be.beDestroy(be_name, 1, True) + + @staticmethod + def cleanup_be(be_name): + be_list = BootEnv.get_be_list() + for elem in be_list: + if "orig_be_name" in elem and be_name == elem["orig_be_name"]: + # Force unmount the be and destroy it. + # Ignore errors. + try: + if elem.get("mounted"): + BootEnv.unmount_be(be_name, force=True) + shutil.rmtree( + elem.get("mountpoint"), ignore_errors=True + ) + BootEnv.destroy_be(be_name) + except Exception as e: + pass + break + + @staticmethod + def mount_be(be_name, mntpt, include_bpool=False): + return be.beMount(be_name, mntpt) + + @staticmethod + def unmount_be(be_name, force=False): + return be.beUnmount(be_name, force=force) + + @staticmethod + def set_default_be(be_name): + return be.beActivate(be_name) + + @staticmethod + def check_be_name(be_name): + try: + if be_name is None: + return + + if be.beVerifyBEName(be_name) != 0: + raise api_errors.InvalidBENameException(be_name) + + beList = BootEnv.get_be_list() + + # If there is already a BE with the same name as + # be_name, then raise an exception. + if be_name in (be.get("orig_be_name") for be in beList): + raise api_errors.DuplicateBEName(be_name) + except AttributeError: + raise api_errors.BENamingNotSupported(be_name) + + @staticmethod + def get_be_list(raise_error=False): + # This check enables the test suite to run much more quickly. + # It is necessary because pkg5unittest (eventually) imports this + # module before the environment is sanitized. + if "PKG_NO_LIVE_ROOT" in os.environ: + return BootEnvNull.get_be_list() + + rc, beList = be.beList(nosnaps=True) + if not beList or rc != 0: + if raise_error: + # Happens e.g. in zones (for now) or live CD + # environment. + raise RuntimeError("nobootenvironments") + beList = [] + + return beList + + @staticmethod + def get_be_names(): + """Return a list of BE names.""" + return [ + be["orig_be_name"] + for be in BootEnv.get_be_list() + if "orig_be_name" in be + ] + + @staticmethod + def get_be_name(path): + """Looks for the name of the boot environment corresponding to + an image root, returning name and uuid""" + beList = BootEnv.get_be_list() + + for be in beList: + be_name = be.get("orig_be_name") + be_uuid = be.get("uuid_str") + + if not be_name or not be.get("mounted"): + continue + + # Check if we're operating on the live BE. + # If so it must also be active. If we are not + # operating on the live BE, then verify + # that the mountpoint of the BE matches + # the path argument passed in by the user. + if path == "/": + if be.get("active"): + return be_name, be_uuid + else: + if be.get("mountpoint") == path: + return be_name, be_uuid + return None, None + + @staticmethod + def get_uuid_be_dic(): + """Return a dictionary of all boot environment names on the + system, keyed by uuid""" + beList = BootEnv.get_be_list() + uuid_bes = {} + for be in beList: + uuid_bes[be.get("uuid_str")] = be.get("orig_be_name") + return uuid_bes + + @staticmethod + def get_activated_be_name(bootnext=False): + """Gets the name of the currently activated boot environment. + If 'bootnext' is true, then also consider temporary (bootnext) + activations""" + try: + beList = BootEnv.get_be_list() + name = None + + for be in beList: + # don't look at active but unbootable BEs. + # (happens in zones when we have ZBEs + # associated with other global zone BEs.) + if be.get("active_unbootable", False): + continue + if not be.get("global_active", True): + continue + if bootnext and be.get("active_nextboot", False): + return be.get("orig_be_name") + if be.get("active_boot"): + name = be.get("orig_be_name") + if not bootnext: + break + return name + except AttributeError: + raise api_errors.BENamingNotSupported(be_name) + + @staticmethod + def get_active_be_name(): + try: + beList = BootEnv.get_be_list() + + for be in beList: + if be.get("active"): + return be.get("orig_be_name") + except AttributeError: + raise api_errors.BENamingNotSupported(be_name) + + def create_backup_be(self, be_name=None): + """Create a backup BE if the BE being modified is the live one. + + 'be_name' is an optional string indicating the name to use + for the new backup BE.""" + + self.check_be_name(be_name) + + if self.is_live_BE: + # Create a clone of the live BE, but do not mount or + # activate it. Do nothing with the returned snapshot + # name that is taken of the clone during beCopy. + ret, be_name_clone, not_used = be.beCopy() + if ret != 0: + raise api_errors.UnableToCopyBE() + + if not be_name: + be_name = self.get_new_be_name(suffix="-backup-1") + ret = be.beRename(be_name_clone, be_name) + if ret != 0: + raise api_errors.UnableToRenameBE(be_name_clone, be_name) + elif be_name is not None: + raise api_errors.BENameGivenOnDeadBE(be_name) + + def init_image_recovery(self, img, be_name=None): + """Initialize for an update. + If a be_name is given, validate it. + If we're operating on a live BE then clone the + live BE and operate on the clone. + If we're operating on a non-live BE we use + the already created snapshot""" + + self.img = img + + if self.is_live_BE: + # Create a clone of the live BE and mount it. + self.destroy_snapshot() + + self.check_be_name(be_name) + + # Do nothing with the returned snapshot name + # that is taken of the clone during beCopy. + ret, self.be_name_clone, not_used = be.beCopy() + if ret != 0: + raise api_errors.UnableToCopyBE() + if be_name: + ret = be.beRename(self.be_name_clone, be_name) + if ret == 0: + self.be_name_clone = be_name else: - # new_bename does not include a numerical suffix - # so start with the bare name - base = new_bename - maxrev = 0 - - # List all BEs, cycle through the names and find the - # one with the same basename as new_bename which has the - # highest revision. This revision will be the starting - # point for building the new BE name so that gaps in the - # numbering are not filled. - - for d in self.beList: - oben = d.get("orig_be_name", None) - if not oben: - continue - nbase, sep, nrev = oben.rpartition("-") - if (not sep or nbase != base or - not nrev.isdigit()): - continue - maxrev = max(int(nrev), maxrev) - - good = False - num = maxrev - while not good: - if num > 0: - new_bename = "-".join((base, str(num))) - else: - new_bename = base - for d in self.beList: - oben = d.get("orig_be_name", None) - if not oben: - continue - if oben == new_bename: - # Already exists - break - else: - good = True - - num += 1 - - return new_bename - - def __store_image_state(self): - """Internal function used to preserve current image information - and history state to be restored later with __reset_image_state - if needed.""" - - # Preserve the current history information and state so that if - # boot environment operations fail, they can be written to the - # original image root, etc. - self.img.history.create_snapshot() - - def __reset_image_state(self, failure=False): - """Internal function intended to be used to reset the image - state, if needed, after the failure or success of boot - environment operations.""" - - if not self.img: - # Nothing to restore. - return - - if self.root != self.img.root: - if failure: - # Since the image root changed and the operation - # was not successful, restore the original - # history and state information so that it can - # be recorded in the original image root. This - # needs to be done before the image root is - # reset since it might fail. - self.img.history.restore_snapshot() - - self.img.history.discard_snapshot() - - # After the completion of an operation that has changed - # the image root, it needs to be reset back to its - # original value so that the client will read and write - # information using the correct location (this is - # especially important for bootenv operations). - self.img.find_root(self.root) + raise api_errors.UnableToRenameBE( + self.be_name_clone, be_name + ) + if be.beMount(self.be_name_clone, self.clone_dir) != 0: + raise api_errors.UnableToMountBE( + self.be_name_clone, self.clone_dir + ) + + # record the UUID of this cloned boot environment + not_used, self.be_name_clone_uuid = BootEnv.get_be_name( + self.clone_dir + ) + + # Set the image to our new mounted BE. + img.find_root(self.clone_dir, exact_match=True) + elif be_name is not None: + raise api_errors.BENameGivenOnDeadBE(be_name) + + def update_boot_archive(self): + """Rebuild the boot archive in the current image. + Just report errors; failure of pkg command is not needed, + and bootadm problems should be rare.""" + cmd = ["/sbin/bootadm", "update-archive", "-R", self.img.get_root()] + + try: + ret = subprocess.call( + cmd, stdout=open(os.devnull), stderr=subprocess.STDOUT + ) + except OSError as e: + logger.error( + _( + "pkg: A system error {e} was " "caught executing {cmd}" + ).format(e=e, cmd=" ".join(cmd)) + ) + return + + if ret: + logger.error( + _( + "pkg: '{cmd}' failed. \nwith " "a return code of {ret:d}." + ).format(cmd=" ".join(cmd), ret=ret) + ) + + def activate_image(self, set_active=True): + """Activate a clone of the BE being operated on. + If were operating on a non-live BE then + destroy the snapshot. + + 'set_active' is an optional argument indicating whether the new + BE (if created) should be set as the active one on next boot. + If 'set_active' is set to the string 'bootnext' then the + a temporary one-time activation will be performed. + """ + + def activate_live_be(): + if set_active: + if set_active == "bootnext": + ret = be.beActivate(self.be_name_clone, temporary=1) else: - self.img.history.discard_snapshot() - - def exists(self): - - """Return true if this object represents a valid BE.""" - - return self.is_valid - - @staticmethod - def libbe_exists(): - return True - - @staticmethod - def check_verify(): - return hasattr(be, "beVerifyBEName") - - @staticmethod - def split_be_entry(bee): - name = bee.get("orig_be_name") - return (name, bee.get("active"), bee.get("active_boot"), - bee.get("space_used"), bee.get("date")) - - @staticmethod - def copy_be(src_be_name, dst_be_name): - ret, be_name_clone, not_used = be.beCopy( - dst_be_name, src_be_name) + ret = be.beActivate(self.be_name_clone) if ret != 0: - raise api_errors.UnableToCopyBE() - - @staticmethod - def rename_be(orig_name, new_name): - return be.beRename(orig_name, new_name) - - @staticmethod - def destroy_be(be_name): - return be.beDestroy(be_name, 1, True) - - @staticmethod - def cleanup_be(be_name): - be_list = BootEnv.get_be_list() - for elem in be_list: - if "orig_be_name" in elem and be_name == \ - elem["orig_be_name"]: - # Force unmount the be and destroy it. - # Ignore errors. - try: - if elem.get("mounted"): - BootEnv.unmount_be( - be_name, force=True) - shutil.rmtree(elem.get( - "mountpoint"), - ignore_errors=True) - BootEnv.destroy_be( - be_name) - except Exception as e: - pass - break - - @staticmethod - def mount_be(be_name, mntpt, include_bpool=False): - return be.beMount(be_name, mntpt) - - @staticmethod - def unmount_be(be_name, force=False): - return be.beUnmount(be_name, force=force) - - @staticmethod - def set_default_be(be_name): - return be.beActivate(be_name) - - @staticmethod - def check_be_name(be_name): - try: - if be_name is None: - return - - if be.beVerifyBEName(be_name) != 0: - raise api_errors.InvalidBENameException(be_name) - - beList = BootEnv.get_be_list() - - # If there is already a BE with the same name as - # be_name, then raise an exception. - if be_name in (be.get("orig_be_name") for be in beList): - raise api_errors.DuplicateBEName(be_name) - except AttributeError: - raise api_errors.BENamingNotSupported(be_name) - - @staticmethod - def get_be_list(raise_error=False): - # This check enables the test suite to run much more quickly. - # It is necessary because pkg5unittest (eventually) imports this - # module before the environment is sanitized. - if "PKG_NO_LIVE_ROOT" in os.environ: - return BootEnvNull.get_be_list() - - rc, beList = be.beList(nosnaps=True) - if not beList or rc != 0: - if raise_error: - # Happens e.g. in zones (for now) or live CD - # environment. - raise RuntimeError("nobootenvironments") - beList = [] - - return beList - - @staticmethod - def get_be_names(): - """Return a list of BE names.""" - return [ - be['orig_be_name'] for be in BootEnv.get_be_list() - if 'orig_be_name' in be - ] - - @staticmethod - def get_be_name(path): - """Looks for the name of the boot environment corresponding to - an image root, returning name and uuid """ - beList = BootEnv.get_be_list() - - for be in beList: - be_name = be.get("orig_be_name") - be_uuid = be.get("uuid_str") - - if not be_name or not be.get("mounted"): - continue - - # Check if we're operating on the live BE. - # If so it must also be active. If we are not - # operating on the live BE, then verify - # that the mountpoint of the BE matches - # the path argument passed in by the user. - if path == '/': - if be.get("active"): - return be_name, be_uuid - else: - if be.get("mountpoint") == path: - return be_name, be_uuid - return None, None - - @staticmethod - def get_uuid_be_dic(): - """Return a dictionary of all boot environment names on the - system, keyed by uuid""" - beList = BootEnv.get_be_list() - uuid_bes = {} - for be in beList: - uuid_bes[be.get("uuid_str")] = be.get("orig_be_name") - return uuid_bes - - @staticmethod - def get_activated_be_name(bootnext=False): - """Gets the name of the currently activated boot environment. - If 'bootnext' is true, then also consider temporary (bootnext) - activations """ - try: - beList = BootEnv.get_be_list() - name = None - - for be in beList: - # don't look at active but unbootable BEs. - # (happens in zones when we have ZBEs - # associated with other global zone BEs.) - if be.get("active_unbootable", False): - continue - if not be.get("global_active", True): - continue - if (bootnext and - be.get("active_nextboot", False)): - return be.get("orig_be_name") - if be.get("active_boot"): - name = be.get("orig_be_name") - if not bootnext: - break - return name - except AttributeError: - raise api_errors.BENamingNotSupported(be_name) - - @staticmethod - def get_active_be_name(): - try: - beList = BootEnv.get_be_list() - - for be in beList: - if be.get("active"): - return be.get("orig_be_name") - except AttributeError: - raise api_errors.BENamingNotSupported(be_name) - - def create_backup_be(self, be_name=None): - """Create a backup BE if the BE being modified is the live one. - - 'be_name' is an optional string indicating the name to use - for the new backup BE.""" - - self.check_be_name(be_name) - - if self.is_live_BE: - # Create a clone of the live BE, but do not mount or - # activate it. Do nothing with the returned snapshot - # name that is taken of the clone during beCopy. - ret, be_name_clone, not_used = be.beCopy() - if ret != 0: - raise api_errors.UnableToCopyBE() - - if not be_name: - be_name = self.get_new_be_name( - suffix="-backup-1") - ret = be.beRename(be_name_clone, be_name) - if ret != 0: - raise api_errors.UnableToRenameBE( - be_name_clone, be_name) - elif be_name is not None: - raise api_errors.BENameGivenOnDeadBE(be_name) - - def init_image_recovery(self, img, be_name=None): - - """Initialize for an update. - If a be_name is given, validate it. - If we're operating on a live BE then clone the - live BE and operate on the clone. - If we're operating on a non-live BE we use - the already created snapshot""" - - self.img = img - - if self.is_live_BE: - # Create a clone of the live BE and mount it. - self.destroy_snapshot() - - self.check_be_name(be_name) - - # Do nothing with the returned snapshot name - # that is taken of the clone during beCopy. - ret, self.be_name_clone, not_used = be.beCopy() - if ret != 0: - raise api_errors.UnableToCopyBE() - if be_name: - ret = be.beRename(self.be_name_clone, be_name) - if ret == 0: - self.be_name_clone = be_name - else: - raise api_errors.UnableToRenameBE( - self.be_name_clone, be_name) - if be.beMount(self.be_name_clone, self.clone_dir) != 0: - raise api_errors.UnableToMountBE( - self.be_name_clone, self.clone_dir) - - # record the UUID of this cloned boot environment - not_used, self.be_name_clone_uuid = \ - BootEnv.get_be_name(self.clone_dir) - - # Set the image to our new mounted BE. - img.find_root(self.clone_dir, exact_match=True) - elif be_name is not None: - raise api_errors.BENameGivenOnDeadBE(be_name) - - def update_boot_archive(self): - """Rebuild the boot archive in the current image. - Just report errors; failure of pkg command is not needed, - and bootadm problems should be rare.""" - cmd = [ - "/sbin/bootadm", "update-archive", "-R", - self.img.get_root() - ] - - try: - ret = subprocess.call(cmd, - stdout = open(os.devnull), stderr=subprocess.STDOUT) - except OSError as e: - logger.error(_("pkg: A system error {e} was " - "caught executing {cmd}").format(e=e, - cmd=" ".join(cmd))) - return - - if ret: - logger.error(_("pkg: '{cmd}' failed. \nwith " - "a return code of {ret:d}.").format( - cmd=" ".join(cmd), ret=ret)) - - def activate_image(self, set_active=True): - """Activate a clone of the BE being operated on. - If were operating on a non-live BE then - destroy the snapshot. - - 'set_active' is an optional argument indicating whether the new - BE (if created) should be set as the active one on next boot. - If 'set_active' is set to the string 'bootnext' then the - a temporary one-time activation will be performed. - """ - - def activate_live_be(): - if set_active: - if set_active == "bootnext": - ret = be.beActivate(self.be_name_clone, - temporary=1) - else: - ret = be.beActivate(self.be_name_clone) - if ret != 0: - logger.error( - _("pkg: unable to activate " - "{0}").format(self.be_name_clone)) - return - - # Consider the last operation a success, and log it as - # ending here so that it will be recorded in the new - # image's history. - self.img.history.operation_new_be = self.be_name_clone - self.img.history.operation_new_be_uuid = self.be_name_clone_uuid - self.img.history.log_operation_end(release_notes= - self.img.imageplan.pd.release_notes_name) - - if be.beUnmount(self.be_name_clone) != 0: - logger.error(_("unable to unmount BE " - "{be_name} mounted at {be_path}").format( - be_name=self.be_name_clone, - be_path=self.clone_dir)) - return - - os.rmdir(self.clone_dir) - - if set_active: - logger.info(_(""" + logger.error( + _("pkg: unable to activate " "{0}").format( + self.be_name_clone + ) + ) + return + + # Consider the last operation a success, and log it as + # ending here so that it will be recorded in the new + # image's history. + self.img.history.operation_new_be = self.be_name_clone + self.img.history.operation_new_be_uuid = self.be_name_clone_uuid + self.img.history.log_operation_end( + release_notes=self.img.imageplan.pd.release_notes_name + ) + + if be.beUnmount(self.be_name_clone) != 0: + logger.error( + _( + "unable to unmount BE " "{be_name} mounted at {be_path}" + ).format(be_name=self.be_name_clone, be_path=self.clone_dir) + ) + return + + os.rmdir(self.clone_dir) + + if set_active: + logger.info( + _( + """ A clone of {be_name} exists and has been updated and activated. On the next boot the Boot Environment {be_name_clone} will be mounted on '/'. Reboot when ready to switch to this updated BE. *** Reboot required *** New BE: {be_name_clone} -""").format(**self.__dict__)) - else: - logger.info(_(""" +""" + ).format(**self.__dict__) + ) + else: + logger.info( + _( + """ A clone of {be_name} exists and has been updated. To set the new BE as the active one on next boot, execute the following command as a privileged user and reboot when ready to switch to the updated BE: beadm activate {be_name_clone} -""").format(**self.__dict__)) - - def activate_be(): - # Delete the snapshot that was taken before we - # updated the image and the boot archive. - logger.info(_("{0} has been updated " - "successfully").format(self.be_name)) - - os.rmdir(self.clone_dir) - self.destroy_snapshot() - self.img.history.operation_snapshot = None - - self.__store_image_state() - - # Ensure cache is flushed before activating and unmounting BE. - self.img.cleanup_cached_content(progtrack=self.progress_tracker) - +""" + ).format(**self.__dict__) + ) + + def activate_be(): + # Delete the snapshot that was taken before we + # updated the image and the boot archive. + logger.info( + _("{0} has been updated " "successfully").format(self.be_name) + ) + + os.rmdir(self.clone_dir) + self.destroy_snapshot() + self.img.history.operation_snapshot = None + + self.__store_image_state() + + # Ensure cache is flushed before activating and unmounting BE. + self.img.cleanup_cached_content(progtrack=self.progress_tracker) + + relock = False + if self.img.locked: + # This is necessary since the lock will + # prevent the boot environment from being + # unmounted during activation. Normally, + # locking for the image is handled + # automatically. + relock = True + self.img.unlock() + + caught_exception = None + + try: + if self.is_live_BE: + activate_live_be() + else: + activate_be() + except Exception as e: + caught_exception = e + if relock: + # Re-lock be image. relock = False - if self.img.locked: - # This is necessary since the lock will - # prevent the boot environment from being - # unmounted during activation. Normally, - # locking for the image is handled - # automatically. - relock = True - self.img.unlock() - - caught_exception = None - - try: - if self.is_live_BE: - activate_live_be() - else: - activate_be() - except Exception as e: - caught_exception = e - if relock: - # Re-lock be image. - relock = False - self.img.lock() - - self.__reset_image_state(failure=caught_exception) - if relock: - # Activation was successful so the be image was - # unmounted and the parent image must be re-locked. - self.img.lock() - - if caught_exception: - self.img.history.log_operation_error(error=e) - raise caught_exception - - def restore_image(self): - """Restore a failed update attempt.""" - - # flush() is necessary here so that the warnings get printed - # on a new line. - if self.progress_tracker: - self.progress_tracker.flush() - - self.__reset_image_state(failure=True) - - # Leave the clone around for debugging purposes if we're - # operating on the live BE. - if self.is_live_BE: - logger.error(_("The running system has not been " - "modified. Modifications were only made to a clone " - "({0}) of the running system. This clone is " - "mounted at {1} should you wish to inspect " - "it.").format( - self.be_name_clone, self.clone_dir)) - - else: - # Rollback and destroy the snapshot. - try: - if be.beRollback(self.be_name, - self.snapshot_name) != 0: - logger.error(_("pkg: unable to " - "rollback BE {0} and restore " - "image").format(self.be_name)) - - self.destroy_snapshot() - os.rmdir(self.clone_dir) - except Exception as e: - self.img.history.log_operation_error(error=e) - raise e - - logger.error(_("{bename} failed to be updated. No " - "changes have been made to {bename}.").format( - bename=self.be_name)) - - def destroy_snapshot(self): - - """Destroy a snapshot of the BE being operated on. - Note that this will destroy the last created - snapshot and does not support destroying - multiple snapshots. Create another instance of - BootEnv to manage multiple snapshots.""" - - if be.beDestroySnapshot(self.be_name, self.snapshot_name) != 0: - logger.error(_("pkg: unable to destroy snapshot " - "{0}").format(self.snapshot_name)) - - def restore_install_uninstall(self): - - """Restore a failed install or uninstall attempt. - Clone the snapshot, mount the BE and - notify user of its existence. Rollback - if not operating on a live BE""" - - # flush() is necessary here so that the warnings get printed - # on a new line. - if self.progress_tracker: - self.progress_tracker.flush() - - if self.is_live_BE: - # Create a new BE based on the previously taken - # snapshot. - - ret, self.be_name_clone, not_used = \ - be.beCopy(None, self.be_name, self.snapshot_name) - if ret != 0: - # If the above beCopy() failed we will try it - # without expecting the BE clone name to be - # returned by libbe. We do this in case an old - # version of libbe is on a system with - # a new version of pkg. - self.be_name_clone = self.be_name + "_" + \ - self.snapshot_name - - ret, not_used, not_used2 = \ - be.beCopy(self.be_name_clone, \ - self.be_name, self.snapshot_name) - if ret != 0: - logger.error(_("pkg: unable to create " - "BE {0}").format( - self.be_name_clone)) - return - - if be.beMount(self.be_name_clone, self.clone_dir) != 0: - logger.error(_("pkg: unable to mount BE " - "{name} on {clone_dir}").format( - name=self.be_name_clone, - clone_dir=self.clone_dir)) - return - - logger.error(_("The Boot Environment {name} failed " - "to be updated. A snapshot was taken before the " - "failed attempt and is mounted here {clone_dir}. " - "Use 'beadm unmount {clone_name}' and then " - "'beadm activate {clone_name}' if you wish to " - "boot to this BE.").format(name=self.be_name, - clone_dir=self.clone_dir, - clone_name=self.be_name_clone)) - else: - if be.beRollback(self.be_name, self.snapshot_name) != 0: - logger.error("pkg: unable to rollback BE " - "{0}".format(self.be_name)) - - self.destroy_snapshot() - - logger.error(_("The Boot Environment {bename} failed " - "to be updated. A snapshot was taken before the " - "failed attempt and has been restored so no " - "changes have been made to {bename}.").format( - bename=self.be_name)) - - def activate_install_uninstall(self): - """Activate an install/uninstall attempt. Which just means - destroy the snapshot for the live and non-live case.""" + self.img.lock() + + self.__reset_image_state(failure=caught_exception) + if relock: + # Activation was successful so the be image was + # unmounted and the parent image must be re-locked. + self.img.lock() + + if caught_exception: + self.img.history.log_operation_error(error=e) + raise caught_exception + + def restore_image(self): + """Restore a failed update attempt.""" + + # flush() is necessary here so that the warnings get printed + # on a new line. + if self.progress_tracker: + self.progress_tracker.flush() + + self.__reset_image_state(failure=True) + + # Leave the clone around for debugging purposes if we're + # operating on the live BE. + if self.is_live_BE: + logger.error( + _( + "The running system has not been " + "modified. Modifications were only made to a clone " + "({0}) of the running system. This clone is " + "mounted at {1} should you wish to inspect " + "it." + ).format(self.be_name_clone, self.clone_dir) + ) + + else: + # Rollback and destroy the snapshot. + try: + if be.beRollback(self.be_name, self.snapshot_name) != 0: + logger.error( + _( + "pkg: unable to " + "rollback BE {0} and restore " + "image" + ).format(self.be_name) + ) self.destroy_snapshot() + os.rmdir(self.clone_dir) + except Exception as e: + self.img.history.log_operation_error(error=e) + raise e + + logger.error( + _( + "{bename} failed to be updated. No " + "changes have been made to {bename}." + ).format(bename=self.be_name) + ) + + def destroy_snapshot(self): + """Destroy a snapshot of the BE being operated on. + Note that this will destroy the last created + snapshot and does not support destroying + multiple snapshots. Create another instance of + BootEnv to manage multiple snapshots.""" + + if be.beDestroySnapshot(self.be_name, self.snapshot_name) != 0: + logger.error( + _("pkg: unable to destroy snapshot " "{0}").format( + self.snapshot_name + ) + ) + + def restore_install_uninstall(self): + """Restore a failed install or uninstall attempt. + Clone the snapshot, mount the BE and + notify user of its existence. Rollback + if not operating on a live BE""" + + # flush() is necessary here so that the warnings get printed + # on a new line. + if self.progress_tracker: + self.progress_tracker.flush() + + if self.is_live_BE: + # Create a new BE based on the previously taken + # snapshot. + + ret, self.be_name_clone, not_used = be.beCopy( + None, self.be_name, self.snapshot_name + ) + if ret != 0: + # If the above beCopy() failed we will try it + # without expecting the BE clone name to be + # returned by libbe. We do this in case an old + # version of libbe is on a system with + # a new version of pkg. + self.be_name_clone = self.be_name + "_" + self.snapshot_name + + ret, not_used, not_used2 = be.beCopy( + self.be_name_clone, self.be_name, self.snapshot_name + ) + if ret != 0: + logger.error( + _("pkg: unable to create " "BE {0}").format( + self.be_name_clone + ) + ) + return + + if be.beMount(self.be_name_clone, self.clone_dir) != 0: + logger.error( + _( + "pkg: unable to mount BE " "{name} on {clone_dir}" + ).format(name=self.be_name_clone, clone_dir=self.clone_dir) + ) + return + + logger.error( + _( + "The Boot Environment {name} failed " + "to be updated. A snapshot was taken before the " + "failed attempt and is mounted here {clone_dir}. " + "Use 'beadm unmount {clone_name}' and then " + "'beadm activate {clone_name}' if you wish to " + "boot to this BE." + ).format( + name=self.be_name, + clone_dir=self.clone_dir, + clone_name=self.be_name_clone, + ) + ) + else: + if be.beRollback(self.be_name, self.snapshot_name) != 0: + logger.error( + "pkg: unable to rollback BE " "{0}".format(self.be_name) + ) + + self.destroy_snapshot() + + logger.error( + _( + "The Boot Environment {bename} failed " + "to be updated. A snapshot was taken before the " + "failed attempt and has been restored so no " + "changes have been made to {bename}." + ).format(bename=self.be_name) + ) + + def activate_install_uninstall(self): + """Activate an install/uninstall attempt. Which just means + destroy the snapshot for the live and non-live case.""" + + self.destroy_snapshot() class BootEnvNull(object): - """BootEnvNull is a class that gets used when libbe doesn't exist.""" + """BootEnvNull is a class that gets used when libbe doesn't exist.""" - def __init__(self, img, progress_tracker=None): - pass + def __init__(self, img, progress_tracker=None): + pass - @staticmethod - def update_boot_archive(): - pass + @staticmethod + def update_boot_archive(): + pass - @staticmethod - def exists(): - return False + @staticmethod + def exists(): + return False - @staticmethod - def libbe_exists(): - return False + @staticmethod + def libbe_exists(): + return False - @staticmethod - def check_verify(): - return False + @staticmethod + def check_verify(): + return False - @staticmethod - def split_be_entry(bee): - return None + @staticmethod + def split_be_entry(bee): + return None - @staticmethod - def copy_be(src_be_name, dst_be_name): - pass + @staticmethod + def copy_be(src_be_name, dst_be_name): + pass - @staticmethod - def rename_be(orig_name, new_name): - pass + @staticmethod + def rename_be(orig_name, new_name): + pass - @staticmethod - def destroy_be(be_name): - pass + @staticmethod + def destroy_be(be_name): + pass - @staticmethod - def cleanup_be(be_name): - pass + @staticmethod + def cleanup_be(be_name): + pass - @staticmethod - def mount_be(be_name, mntpt, include_bpool=False): - return None + @staticmethod + def mount_be(be_name, mntpt, include_bpool=False): + return None - @staticmethod - def unmount_be(be_name, force=False): - return None + @staticmethod + def unmount_be(be_name, force=False): + return None - @staticmethod - def set_default_be(be_name): - pass + @staticmethod + def set_default_be(be_name): + pass - @staticmethod - def check_be_name(be_name): - if be_name: - raise api_errors.BENamingNotSupported(be_name) + @staticmethod + def check_be_name(be_name): + if be_name: + raise api_errors.BENamingNotSupported(be_name) - @staticmethod - def get_be_list(): - return [] + @staticmethod + def get_be_list(): + return [] - @staticmethod - def get_be_names(): - return [] + @staticmethod + def get_be_names(): + return [] - @staticmethod - def get_be_name(path): - return None, None + @staticmethod + def get_be_name(path): + return None, None - @staticmethod - def get_uuid_be_dic(): - return misc.EmptyDict + @staticmethod + def get_uuid_be_dic(): + return misc.EmptyDict - @staticmethod - def get_activated_be_name(bootnext=False): - pass + @staticmethod + def get_activated_be_name(bootnext=False): + pass - @staticmethod - def get_active_be_name(): - pass + @staticmethod + def get_active_be_name(): + pass - @staticmethod - def get_new_be_name(new_bename=None, suffix=None): - pass + @staticmethod + def get_new_be_name(new_bename=None, suffix=None): + pass - @staticmethod - def create_backup_be(be_name=None): - if be_name is not None: - raise api_errors.BENameGivenOnDeadBE(be_name) + @staticmethod + def create_backup_be(be_name=None): + if be_name is not None: + raise api_errors.BENameGivenOnDeadBE(be_name) - @staticmethod - def init_image_recovery(img, be_name=None): - if be_name is not None: - raise api_errors.BENameGivenOnDeadBE(be_name) + @staticmethod + def init_image_recovery(img, be_name=None): + if be_name is not None: + raise api_errors.BENameGivenOnDeadBE(be_name) - @staticmethod - def activate_image(): - pass + @staticmethod + def activate_image(): + pass + + @staticmethod + def restore_image(): + pass - @staticmethod - def restore_image(): - pass + @staticmethod + def destroy_snapshot(): + pass - @staticmethod - def destroy_snapshot(): - pass + @staticmethod + def restore_install_uninstall(): + pass - @staticmethod - def restore_install_uninstall(): - pass + @staticmethod + def activate_install_uninstall(): + pass - @staticmethod - def activate_install_uninstall(): - pass if "be" not in locals(): - BootEnv = BootEnvNull + BootEnv = BootEnvNull # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/client_api.py b/src/modules/client/client_api.py index fddd26c1e..f5007456c 100644 --- a/src/modules/client/client_api.py +++ b/src/modules/client/client_api.py @@ -61,12 +61,22 @@ import pkg.version as version from pkg.client import global_settings -from pkg.client.api import (IMG_TYPE_ENTIRE, IMG_TYPE_PARTIAL, - IMG_TYPE_USER, RESULT_CANCELED, RESULT_FAILED_BAD_REQUEST, - RESULT_FAILED_CONFIGURATION, RESULT_FAILED_CONSTRAINED, - RESULT_FAILED_LOCKED, RESULT_FAILED_STORAGE, RESULT_NOTHING_TO_DO, - RESULT_SUCCEEDED, RESULT_FAILED_TRANSPORT, RESULT_FAILED_UNKNOWN, - RESULT_FAILED_OUTOFMEMORY) +from pkg.client.api import ( + IMG_TYPE_ENTIRE, + IMG_TYPE_PARTIAL, + IMG_TYPE_USER, + RESULT_CANCELED, + RESULT_FAILED_BAD_REQUEST, + RESULT_FAILED_CONFIGURATION, + RESULT_FAILED_CONSTRAINED, + RESULT_FAILED_LOCKED, + RESULT_FAILED_STORAGE, + RESULT_NOTHING_TO_DO, + RESULT_SUCCEEDED, + RESULT_FAILED_TRANSPORT, + RESULT_FAILED_UNKNOWN, + RESULT_FAILED_OUTOFMEMORY, +) from pkg.client.debugvalues import DebugValues from pkg.client.pkgdefs import * from pkg.misc import EmptyI, msg, emsg, PipeError @@ -79,1096 +89,1282 @@ def _strify(input): - """Convert unicode string into byte string in Python 2 and convert - bytes string into unicode string in Python 3. This will be used by json - loads function.""" - - if isinstance(input, dict): - return dict([(_strify(key), _strify(value)) for key, value in - six.iteritems(input)]) - elif isinstance(input, list): - return [_strify(element) for element in input] - elif isinstance(input, (six.string_types, bytes)): - return misc.force_str(input, "utf-8") - else: - return input + """Convert unicode string into byte string in Python 2 and convert + bytes string into unicode string in Python 3. This will be used by json + loads function.""" + + if isinstance(input, dict): + return dict( + [ + (_strify(key), _strify(value)) + for key, value in six.iteritems(input) + ] + ) + elif isinstance(input, list): + return [_strify(element) for element in input] + elif isinstance(input, (six.string_types, bytes)): + return misc.force_str(input, "utf-8") + else: + return input + def _get_pkg_input_schema(subcommand, opts_mapping=misc.EmptyDict): - """Get the input schema for pkg subcommand.""" + """Get the input schema for pkg subcommand.""" - # Return None if the subcommand is not defined. - if subcommand not in cmds: - return None + # Return None if the subcommand is not defined. + if subcommand not in cmds: + return None + + props = {} + data_schema = __get_pkg_input_schema(subcommand, opts_mapping=opts_mapping) + props.update(data_schema) + schema = __construct_json_schema( + "{0} input schema".format(subcommand), properties=props + ) + return schema - props = {} - data_schema = __get_pkg_input_schema(subcommand, - opts_mapping=opts_mapping) - props.update(data_schema) - schema = __construct_json_schema("{0} input schema".format(subcommand), - properties=props) - return schema def _get_pkg_output_schema(subcommand): - """Get the output schema for pkg subcommand.""" + """Get the output schema for pkg subcommand.""" + + # Return None if the subcommand is not defined. + if subcommand not in cmds: + return None + + props = { + "status": {"type": "number"}, + "errors": {"type": "array", "items": __default_error_json_schema()}, + } + required = ["status"] + data_schema = cmds[subcommand][1]() + if data_schema: + props["data"] = data_schema + schema = __construct_json_schema( + "{0} output schema".format(subcommand), + properties=props, + required=required, + ) + return schema - # Return None if the subcommand is not defined. - if subcommand not in cmds: - return None - - props = {"status": {"type": "number"}, - "errors": {"type": "array", - "items": __default_error_json_schema() - } - } - required = ["status"] - data_schema = cmds[subcommand][1]() - if data_schema: - props["data"] = data_schema - schema = __construct_json_schema("{0} output schema".format( - subcommand), properties=props, required=required) - return schema def __get_pkg_input_schema(pkg_op, opts_mapping=misc.EmptyDict): - properties = {} - opts = options.pkg_op_opts[pkg_op] - if opts is not None: - for entry in opts: - if type(entry) != tuple: - continue - if len(entry) == 4: - opt, dummy_default, dummy_valid_args, \ - schema = entry - - if opt in opts_mapping: - optn = opts_mapping[opt] - if optn: - properties[optn] = schema - else: - properties[opt] = schema - else: - properties[opt] = schema - - arg_name = "pargs_json" - input_schema = \ - {arg_name: { - "type": "array", - "items": { - "type": "string" - } - }, - "opts_json": {"type": "object", - "properties": properties - }, - } - return input_schema + properties = {} + opts = options.pkg_op_opts[pkg_op] + if opts is not None: + for entry in opts: + if type(entry) != tuple: + continue + if len(entry) == 4: + opt, dummy_default, dummy_valid_args, schema = entry + + if opt in opts_mapping: + optn = opts_mapping[opt] + if optn: + properties[optn] = schema + else: + properties[opt] = schema + else: + properties[opt] = schema -def __pkg_list_output_schema(): - data_schema = {"type": "array", - "items": { - "type": "object", - "properties": { - "pub": {"type": "string"}, - "pkg": {"type": "string"}, - "version": {"type": "string"}, - "summary": {"type": "string"}, - "states": {"type": "array", - "items": {"type": "string"}} - } - } - } - return data_schema + arg_name = "pargs_json" + input_schema = { + arg_name: {"type": "array", "items": {"type": "string"}}, + "opts_json": {"type": "object", "properties": properties}, + } + return input_schema -def __get_plan_props(): - msg_payload_item = { + +def __pkg_list_output_schema(): + data_schema = { + "type": "array", + "items": { "type": "object", "properties": { - "msg_time": {"type": ["null", "string"]}, - "msg_level": {"type": ["null", "string"]}, - "msg_type": {"type": ["null", "string"]}, - "msg_text": {"type": ["null", "string"]} - } - } - plan_props = {"type": "object", - "properties": { - "image-name": {"type": ["null", "string"]}, - "affect-services": { - "type": "array", - "items": {} - }, - "licenses": { - "type": "array", - "items": [ + "pub": {"type": "string"}, + "pkg": {"type": "string"}, + "version": {"type": "string"}, + "summary": {"type": "string"}, + "states": {"type": "array", "items": {"type": "string"}}, + }, + }, + } + return data_schema + + +def __get_plan_props(): + msg_payload_item = { + "type": "object", + "properties": { + "msg_time": {"type": ["null", "string"]}, + "msg_level": {"type": ["null", "string"]}, + "msg_type": {"type": ["null", "string"]}, + "msg_text": {"type": ["null", "string"]}, + }, + } + plan_props = { + "type": "object", + "properties": { + "image-name": {"type": ["null", "string"]}, + "affect-services": {"type": "array", "items": {}}, + "licenses": { + "type": "array", + "items": [ { - "type": "array", - "items": [ - {"type": ["null", "string"]}, - {}, - { - "type": "array", - "items": [ + "type": "array", + "items": [ {"type": ["null", "string"]}, - {"type": ["null", "string"]}, - {"type": ["null", "string"]}, - {"type": ["null", "boolean"]}, - {"type": ["null", "boolean"]} - ]}] + {}, + { + "type": "array", + "items": [ + {"type": ["null", "string"]}, + {"type": ["null", "string"]}, + {"type": ["null", "string"]}, + {"type": ["null", "boolean"]}, + {"type": ["null", "boolean"]}, + ], + }, + ], }, - {"type": "array", - "items": [ - {"type": ["null", "string"]}, - {}, - {"type": "array", - "items": [ - {"type": ["null", "string"]}, - {"type": ["null", "string"]}, + { + "type": "array", + "items": [ {"type": ["null", "string"]}, - {"type": ["null", "boolean"]}, - {"type": ["null", "boolean"]} - ]}]}] - }, - "child-images": { - "type": "array", - "items": {} - }, - "change-mediators": { - "type": "array", - "items": {} - }, - "change-facets": { - "type": "array", - "items": {} - }, - "remove-packages": { - "type": "array", - "items": {} - }, - "be-name": { - "type": ["null", "string"], - }, - "space-available": { - "type": ["null", "number"], - }, - "boot-archive-rebuild": { - "type": ["null", "boolean"], - }, - "version": { - "type": ["null", "number"], - }, - "create-new-be": { - "type": ["null", "boolean"], - }, - "change-packages": { - "type": "array", - "items": {} - }, - "space-required": { - "type": ["null", "number"], - }, - "change-variants": { - "type": "array", - "items": {} - }, - "affect-packages": { - "type": "array", - "items": {} - }, - "change-editables": { - "type": "array", - "items": {} - }, - "create-backup-be": { - "type": ["null", "boolean"], - }, - "release-notes": { - "type": "array", - "items": {} - }, - "add-packages": { - "type": "array", - "items": { - "type": ["null", "string"] - }, - }, - "backup-be-name": { - "type": ["null", "string"] - }, - "activate-be": { - "type": ["null", "boolean"], - }, - # Because item id is non-deterministic, only properties that - # can be determined are listed here. - "item-messages": {"type": "object", - "properties": { - "unpackaged": {"type": "object", - "properties": { - "errors": {"type": "array", - "items": msg_payload_item}, - "warnings": {"type": "array", - "items": msg_payload_item} - } - } + {}, + { + "type": "array", + "items": [ + {"type": ["null", "string"]}, + {"type": ["null", "string"]}, + {"type": ["null", "string"]}, + {"type": ["null", "boolean"]}, + {"type": ["null", "boolean"]}, + ], + }, + ], + }, + ], + }, + "child-images": {"type": "array", "items": {}}, + "change-mediators": {"type": "array", "items": {}}, + "change-facets": {"type": "array", "items": {}}, + "remove-packages": {"type": "array", "items": {}}, + "be-name": { + "type": ["null", "string"], + }, + "space-available": { + "type": ["null", "number"], + }, + "boot-archive-rebuild": { + "type": ["null", "boolean"], + }, + "version": { + "type": ["null", "number"], + }, + "create-new-be": { + "type": ["null", "boolean"], + }, + "change-packages": {"type": "array", "items": {}}, + "space-required": { + "type": ["null", "number"], + }, + "change-variants": {"type": "array", "items": {}}, + "affect-packages": {"type": "array", "items": {}}, + "change-editables": {"type": "array", "items": {}}, + "create-backup-be": { + "type": ["null", "boolean"], + }, + "release-notes": {"type": "array", "items": {}}, + "add-packages": { + "type": "array", + "items": {"type": ["null", "string"]}, + }, + "backup-be-name": {"type": ["null", "string"]}, + "activate-be": { + "type": ["null", "boolean"], + }, + # Because item id is non-deterministic, only properties that + # can be determined are listed here. + "item-messages": { + "type": "object", + "properties": { + "unpackaged": { + "type": "object", + "properties": { + "errors": { + "type": "array", + "items": msg_payload_item, + }, + "warnings": { + "type": "array", + "items": msg_payload_item, + }, + }, } - } - } - } - return plan_props + }, + }, + }, + } + return plan_props + def __pkg_exact_install_output_schema(): - data_schema = {"type": "object", - "properties": { - "plan": __get_plan_props(), - "release_notes_url": {"type": ["null", "string"]} - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "plan": __get_plan_props(), + "release_notes_url": {"type": ["null", "string"]}, + }, + } + return data_schema + def __pkg_install_output_schema(): - data_schema = {"type": "object", - "properties": { - "plan": __get_plan_props(), - "release_notes_url": {"type": ["null", "string"]} - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "plan": __get_plan_props(), + "release_notes_url": {"type": ["null", "string"]}, + }, + } + return data_schema + def __pkg_update_output_schema(): - data_schema = {"type": "object", - "properties": { - "plan": __get_plan_props(), - "release_notes_url": {"type": ["null", "string"]} - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "plan": __get_plan_props(), + "release_notes_url": {"type": ["null", "string"]}, + }, + } + return data_schema + def __pkg_uninstall_output_schema(): - data_schema = {"type": "object", - "properties": { - "plan": __get_plan_props(), - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "plan": __get_plan_props(), + }, + } + return data_schema + def __pkg_publisher_set_output_schema(): - data_schema = {"type": "object", - "properties": { - "header": {"type": "string"}, - "added": {"type": "array", "items": {"type": "string"}}, - "updated": {"type": "array", "items": {"type": "string"}} - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "header": {"type": "string"}, + "added": {"type": "array", "items": {"type": "string"}}, + "updated": {"type": "array", "items": {"type": "string"}}, + }, + } + return data_schema + def __pkg_publisher_unset_output_schema(): - return {} + return {} + def __pkg_publisher_output_schema(): - data_schema = {"type": "object", - "properties": { - "header": {"type": "array", "items": {"type": "string"}}, - "publishers": {"type": "array", "items": {"type": "array", - "items": {"type": ["null", "string"]}}}, - "publisher_details": {"type": "array", - "items": {"type": "object", "properties": { + data_schema = { + "type": "object", + "properties": { + "header": {"type": "array", "items": {"type": "string"}}, + "publishers": { + "type": "array", + "items": { + "type": "array", + "items": {"type": ["null", "string"]}, + }, + }, + "publisher_details": { + "type": "array", + "items": { + "type": "object", + "properties": { "Publisher": {"type": ["null", "string"]}, "Alias": {"type": ["null", "string"]}, "Client UUID": {"type": ["null", "string"]}, "Catalog Updated": {"type": ["null", "string"]}, "Enabled": {"type": ["null", "string"]}, "Properties": {"type": "object"}, - "origins": {"type": "array", - "items": {"type": "object"}}, - "mirrors": {"type": "array", - "items": {"type": "object"}}, + "origins": { + "type": "array", + "items": {"type": "object"}, + }, + "mirrors": { + "type": "array", + "items": {"type": "object"}, + }, "Approved CAs": {"type": "array"}, "Revoked CAs": {"type": "array"}, - }}} - } - } - return data_schema + }, + }, + }, + }, + } + return data_schema + def __pkg_info_output_schema(): - data_schema = {"type": "object", - "properties": { - "licenses": {"type": "array", "items": {"type": "array", - "items": {"type": ["null", "string"]}}}, - "package_attrs": {"type": "array", - "items": {"type": "array", "items": {"type": "array", - "items": [{"type": ["null", "string"]}, {"type": "array", - "items": {"type": ["null", "string"]}}]}}} - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "licenses": { + "type": "array", + "items": { + "type": "array", + "items": {"type": ["null", "string"]}, + }, + }, + "package_attrs": { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "array", + "items": [ + {"type": ["null", "string"]}, + { + "type": "array", + "items": {"type": ["null", "string"]}, + }, + ], + }, + }, + }, + }, + } + return data_schema + def __pkg_verify_output_schema(): - data_schema = {"type": "object", - "properties": { - "plan": __get_plan_props(), - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "plan": __get_plan_props(), + }, + } + return data_schema + def __pkg_fix_output_schema(): - data_schema = {"type": "object", - "properties": { - "plan": __get_plan_props(), - } - } - return data_schema + data_schema = { + "type": "object", + "properties": { + "plan": __get_plan_props(), + }, + } + return data_schema + def _format_update_error(e, errors_json=None): - # This message is displayed to the user whenever an - # ImageFormatUpdateNeeded exception is encountered. - if errors_json: - error = {"reason": str(e), "errtype": "format_update"} - errors_json.append(error) + # This message is displayed to the user whenever an + # ImageFormatUpdateNeeded exception is encountered. + if errors_json: + error = {"reason": str(e), "errtype": "format_update"} + errors_json.append(error) + def _error_json(text, cmd=None, errors_json=None, errorType=None): - """Prepare an error message for json output. """ + """Prepare an error message for json output.""" - if not isinstance(text, six.string_types): - # Assume it's an object that can be stringified. - text = str(text) + if not isinstance(text, six.string_types): + # Assume it's an object that can be stringified. + text = str(text) - # If the message starts with whitespace, assume that it should come - # *before* the command-name prefix. - text_nows = text.lstrip() - ws = text[:len(text) - len(text_nows)] + # If the message starts with whitespace, assume that it should come + # *before* the command-name prefix. + text_nows = text.lstrip() + ws = text[: len(text) - len(text_nows)] - if cmd: - text_nows = "{0}: {1}".format(cmd, text_nows) - pkg_cmd = "pkg " - else: - pkg_cmd = "pkg: " + if cmd: + text_nows = "{0}: {1}".format(cmd, text_nows) + pkg_cmd = "pkg " + else: + pkg_cmd = "pkg: " + + if errors_json is not None: + error = {} + if errorType: + error["errtype"] = errorType + error["reason"] = ws + pkg_cmd + text_nows + errors_json.append(error) - if errors_json is not None: - error = {} - if errorType: - error["errtype"] = errorType - error["reason"] = ws + pkg_cmd + text_nows - errors_json.append(error) def _collect_proxy_config_errors(errors_json=None): - """If the user has configured http_proxy or https_proxy in the - environment, collect the values. Some transport errors are - not debuggable without this information handy.""" - - http_proxy = os.environ.get("http_proxy", None) - https_proxy = os.environ.get("https_proxy", None) - - if not http_proxy and not https_proxy: - return - - err = "\nThe following proxy configuration is set in the " \ - "environment:\n" - if http_proxy: - err += "http_proxy: {0}\n".format(http_proxy) - if https_proxy: - err += "https_proxy: {0}\n".format(https_proxy) - if errors_json: - errors_json.append({"reason": err}) + """If the user has configured http_proxy or https_proxy in the + environment, collect the values. Some transport errors are + not debuggable without this information handy.""" -def _get_fmri_args(api_inst, pargs, cmd=None, errors_json=None): - """ Convenience routine to check that input args are valid fmris. """ - - res = [] - errors = [] - for pat, err, pfmri, matcher in api_inst.parse_fmri_patterns(pargs): - if not err: - res.append((pat, err, pfmri, matcher)) - continue - if isinstance(err, version.VersionError): - # For version errors, include the pattern so - # that the user understands why it failed. - errors.append("Illegal FMRI '{0}': {1}".format(pat, - err)) - else: - # Including the pattern is redundant for other - # exceptions. - errors.append(err) - if errors: - _error_json("\n".join(str(e) for e in errors), - cmd=cmd, errors_json=errors_json) - return len(errors) == 0, res + http_proxy = os.environ.get("http_proxy", None) + https_proxy = os.environ.get("https_proxy", None) -def __default_error_json_schema(): - """Get the default error json schema.""" + if not http_proxy and not https_proxy: + return - error_schema = { - "type": "object", - "properties": { - "errtype": {"type": "string", - "enum": ["format_update", "catalog_refresh", - "catalog_refresh_failed", "inventory", - "inventory_extra", "plan_license", "publisher_set", - "unsupported_repo_op", "cert_info", "info_not_found", - "info_no_licenses"]}, - "reason": {"type": "string"}, - "info": {"type": "string"} - } - } - return error_schema + err = "\nThe following proxy configuration is set in the " "environment:\n" + if http_proxy: + err += "http_proxy: {0}\n".format(http_proxy) + if https_proxy: + err += "https_proxy: {0}\n".format(https_proxy) + if errors_json: + errors_json.append({"reason": err}) -def __construct_json_schema(title, description=None, stype="object", - properties=None, required=None, additional_prop=False): - """Construct json schema.""" - json_schema = {"$schema": "http://json-schema.org/draft-04/schema#", - "title": title, - "type": stype, - } - if description: - json_schema["description"] = description - if properties: - json_schema["properties"] = properties - if required: - json_schema["required"] = required - json_schema["additionalProperties"] = additional_prop - return json_schema +def _get_fmri_args(api_inst, pargs, cmd=None, errors_json=None): + """Convenience routine to check that input args are valid fmris.""" + + res = [] + errors = [] + for pat, err, pfmri, matcher in api_inst.parse_fmri_patterns(pargs): + if not err: + res.append((pat, err, pfmri, matcher)) + continue + if isinstance(err, version.VersionError): + # For version errors, include the pattern so + # that the user understands why it failed. + errors.append("Illegal FMRI '{0}': {1}".format(pat, err)) + else: + # Including the pattern is redundant for other + # exceptions. + errors.append(err) + if errors: + _error_json( + "\n".join(str(e) for e in errors), cmd=cmd, errors_json=errors_json + ) + return len(errors) == 0, res -def __prepare_json(status, op=None, schema=None, data=None, errors=None): - """Prepare json structure for returning.""" - ret_json = {"status": status} +def __default_error_json_schema(): + """Get the default error json schema.""" + + error_schema = { + "type": "object", + "properties": { + "errtype": { + "type": "string", + "enum": [ + "format_update", + "catalog_refresh", + "catalog_refresh_failed", + "inventory", + "inventory_extra", + "plan_license", + "publisher_set", + "unsupported_repo_op", + "cert_info", + "info_not_found", + "info_no_licenses", + ], + }, + "reason": {"type": "string"}, + "info": {"type": "string"}, + }, + } + return error_schema + + +def __construct_json_schema( + title, + description=None, + stype="object", + properties=None, + required=None, + additional_prop=False, +): + """Construct json schema.""" + + json_schema = { + "$schema": "http://json-schema.org/draft-04/schema#", + "title": title, + "type": stype, + } + if description: + json_schema["description"] = description + if properties: + json_schema["properties"] = properties + if required: + json_schema["required"] = required + json_schema["additionalProperties"] = additional_prop + return json_schema - if errors: - if not isinstance(errors, list): - ret_json["errors"] = [errors] - else: - ret_json["errors"] = errors - if data: - ret_json["data"] = data - if op: - op_schema = _get_pkg_output_schema(op) - try: - json.validate(ret_json, op_schema) - except json.ValidationError as e: - newret_json = {"status": EXIT_OOPS, - "errors": [{"reason": str(e)}]} - return newret_json - if schema: - ret_json["schema"] = schema - return ret_json +def __prepare_json(status, op=None, schema=None, data=None, errors=None): + """Prepare json structure for returning.""" -def _collect_catalog_failures(cre, ignore_perms_failure=False, errors=None): - total = cre.total - succeeded = cre.succeeded - partial = 0 - refresh_errstr = "" - - for pub, err in cre.failed: - if isinstance(err, api_errors.CatalogOriginRefreshException): - if len(err.failed) < err.total: - partial += 1 - - refresh_errstr += _("\n{0}/{1} repositories for " - "publisher '{2}' could not be refreshed.\n").format( - len(err.failed), err.total, pub) - for o, e in err.failed: - refresh_errstr += "\n" - refresh_errstr += str(e) - refresh_errstr += "\n" - else: - refresh_errstr += "\n\n" + str(err) + ret_json = {"status": status} + if errors: + if not isinstance(errors, list): + ret_json["errors"] = [errors] + else: + ret_json["errors"] = errors + if data: + ret_json["data"] = data + if op: + op_schema = _get_pkg_output_schema(op) + try: + json.validate(ret_json, op_schema) + except json.ValidationError as e: + newret_json = {"status": EXIT_OOPS, "errors": [{"reason": str(e)}]} + return newret_json + if schema: + ret_json["schema"] = schema - partial_str = ":" - if partial: - partial_str = _(" ({0} partial):").format(str(partial)) + return ret_json - txt = _("pkg: {succeeded}/{total} catalogs successfully " - "updated{partial}").format(succeeded=succeeded, total=total, - partial=partial_str) - if errors is not None: - if cre.failed: - error = {"reason": txt, "errtype": "catalog_refresh"} - else: - error = {"info": txt, "errtype": "catalog_refresh"} - errors.append(error) - - for pub, err in cre.failed: - if ignore_perms_failure and \ - not isinstance(err, api_errors.PermissionsException): - # If any errors other than a permissions exception are - # found, then don't ignore them. - ignore_perms_failure = False - break +def _collect_catalog_failures(cre, ignore_perms_failure=False, errors=None): + total = cre.total + succeeded = cre.succeeded + partial = 0 + refresh_errstr = "" + + for pub, err in cre.failed: + if isinstance(err, api_errors.CatalogOriginRefreshException): + if len(err.failed) < err.total: + partial += 1 + + refresh_errstr += _( + "\n{0}/{1} repositories for " + "publisher '{2}' could not be refreshed.\n" + ).format(len(err.failed), err.total, pub) + for o, e in err.failed: + refresh_errstr += "\n" + refresh_errstr += str(e) + refresh_errstr += "\n" + else: + refresh_errstr += "\n\n" + str(err) - if cre.failed and ignore_perms_failure: - # Consider those that failed to have succeeded and add them - # to the actual successful total. - return succeeded + partial + len(cre.failed) + partial_str = ":" + if partial: + partial_str = _(" ({0} partial):").format(str(partial)) + txt = _( + "pkg: {succeeded}/{total} catalogs successfully " "updated{partial}" + ).format(succeeded=succeeded, total=total, partial=partial_str) + if errors is not None: + if cre.failed: + error = {"reason": txt, "errtype": "catalog_refresh"} + else: + error = {"info": txt, "errtype": "catalog_refresh"} + errors.append(error) + + for pub, err in cre.failed: + if ignore_perms_failure and not isinstance( + err, api_errors.PermissionsException + ): + # If any errors other than a permissions exception are + # found, then don't ignore them. + ignore_perms_failure = False + break + + if cre.failed and ignore_perms_failure: + # Consider those that failed to have succeeded and add them + # to the actual successful total. + return succeeded + partial + len(cre.failed) + + if errors is not None: + error = {"reason": str(refresh_errstr), "errtype": "catalog_refresh"} + errors.append(error) + + if cre.errmessage: if errors is not None: - error = {"reason": str(refresh_errstr), - "errtype": "catalog_refresh"} - errors.append(error) - - if cre.errmessage: - if errors is not None: - error = {"reason": str(cre.errmessage), - "errtype": "catalog_refresh"} - errors.append(error) - - return succeeded + partial - -def _list_inventory(op, api_inst, pargs, - li_parent_sync, list_all, list_installed_newest, list_newest, - list_upgradable, origins, quiet, refresh_catalogs, **other_opts): - """List packages.""" - - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) - - variants = False - pkg_list = api.ImageInterface.LIST_INSTALLED - if list_all: - variants = True - pkg_list = api.ImageInterface.LIST_ALL - elif list_installed_newest: - pkg_list = api.ImageInterface.LIST_INSTALLED_NEWEST - elif list_newest: - pkg_list = api.ImageInterface.LIST_NEWEST - elif list_upgradable: - pkg_list = api.ImageInterface.LIST_UPGRADABLE - elif 'list_removable' in other_opts and other_opts['list_removable']: - pkg_list = api.ImageInterface.LIST_REMOVABLE - - # Each pattern in pats can be a partial or full FMRI, so - # extract the individual components. These patterns are - # transformed here so that partial failure can be detected - # when more than one pattern is provided. - errors_json = [] - rval, res = _get_fmri_args(api_inst, pargs, cmd=op, - errors_json=errors_json) - if not rval: + error = { + "reason": str(cre.errmessage), + "errtype": "catalog_refresh", + } + errors.append(error) + + return succeeded + partial + + +def _list_inventory( + op, + api_inst, + pargs, + li_parent_sync, + list_all, + list_installed_newest, + list_newest, + list_upgradable, + origins, + quiet, + refresh_catalogs, + **other_opts, +): + """List packages.""" + + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) + + variants = False + pkg_list = api.ImageInterface.LIST_INSTALLED + if list_all: + variants = True + pkg_list = api.ImageInterface.LIST_ALL + elif list_installed_newest: + pkg_list = api.ImageInterface.LIST_INSTALLED_NEWEST + elif list_newest: + pkg_list = api.ImageInterface.LIST_NEWEST + elif list_upgradable: + pkg_list = api.ImageInterface.LIST_UPGRADABLE + elif "list_removable" in other_opts and other_opts["list_removable"]: + pkg_list = api.ImageInterface.LIST_REMOVABLE + + # Each pattern in pats can be a partial or full FMRI, so + # extract the individual components. These patterns are + # transformed here so that partial failure can be detected + # when more than one pattern is provided. + errors_json = [] + rval, res = _get_fmri_args(api_inst, pargs, cmd=op, errors_json=errors_json) + if not rval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + api_inst.log_operation_start(op) + if ( + pkg_list not in [api_inst.LIST_INSTALLED, api_inst.LIST_REMOVABLE] + and refresh_catalogs + ): + # If the user requested packages other than those + # installed, ensure that a refresh is performed if + # needed since the catalog may be out of date or + # invalid as a result of publisher information + # changing (such as an origin uri, etc.). + try: + api_inst.refresh(ignore_unreachable=False) + except api_errors.PermissionsException: + # Ignore permission exceptions with the + # assumption that an unprivileged user is + # executing this command and that the + # refresh doesn't matter. + pass + except api_errors.CatalogRefreshException as e: + succeeded = _collect_catalog_failures( + e, ignore_perms_failure=True, errors=errors_json + ) + if succeeded != e.total: + # If total number of publishers does + # not match 'successful' number + # refreshed, abort. return __prepare_json(EXIT_OOPS, errors=errors_json) - api_inst.log_operation_start(op) - if (pkg_list not in [api_inst.LIST_INSTALLED, api_inst.LIST_REMOVABLE] - and refresh_catalogs): - # If the user requested packages other than those - # installed, ensure that a refresh is performed if - # needed since the catalog may be out of date or - # invalid as a result of publisher information - # changing (such as an origin uri, etc.). - try: - api_inst.refresh(ignore_unreachable=False) - except api_errors.PermissionsException: - # Ignore permission exceptions with the - # assumption that an unprivileged user is - # executing this command and that the - # refresh doesn't matter. - pass - except api_errors.CatalogRefreshException as e: - succeeded = _collect_catalog_failures(e, - ignore_perms_failure=True, errors=errors_json) - if succeeded != e.total: - # If total number of publishers does - # not match 'successful' number - # refreshed, abort. - return __prepare_json(EXIT_OOPS, - errors=errors_json) - - except: - # Ignore the above error and just use what - # already exists. - pass - - state_map = [ - [(api.PackageInfo.INSTALLED, "installed")], - [(api.PackageInfo.FROZEN, "frozen")], - [(api.PackageInfo.OPTIONAL, "optional")], - [(api.PackageInfo.MANUAL, "manual")], - [ - (api.PackageInfo.OBSOLETE, "obsolete"), - (api.PackageInfo.LEGACY, "legacy"), - (api.PackageInfo.RENAMED, "renamed") - ], - ] + except: + # Ignore the above error and just use what + # already exists. + pass + + state_map = [ + [(api.PackageInfo.INSTALLED, "installed")], + [(api.PackageInfo.FROZEN, "frozen")], + [(api.PackageInfo.OPTIONAL, "optional")], + [(api.PackageInfo.MANUAL, "manual")], + [ + (api.PackageInfo.OBSOLETE, "obsolete"), + (api.PackageInfo.LEGACY, "legacy"), + (api.PackageInfo.RENAMED, "renamed"), + ], + ] + + # Now get the matching list of packages and display it. + found = False + + data = [] + try: + res = api_inst.get_pkg_list( + pkg_list, + patterns=pargs, + raise_unmatched=True, + repos=origins, + variants=variants, + ) + for pt, summ, cats, states, attrs in res: + found = True + entry = {} + pub, stem, ver = pt + entry["pub"] = pub + entry["pkg"] = stem + entry["version"] = ver + entry["summary"] = summ + + stateslist = [] + for sentry in state_map: + for s, v in sentry: + if s in states: + stateslist.append(v) + break + entry["states"] = stateslist + data.append(entry) + if not found and not pargs: + if pkg_list == api_inst.LIST_INSTALLED: + if not quiet: + err = {"reason": _("no packages installed")} + errors_json.append(err) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + elif pkg_list == api_inst.LIST_INSTALLED_NEWEST: + if not quiet: + err = { + "reason": _( + "no packages " + "installed or available for " + "installation" + ) + } + errors_json.append(err) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + elif pkg_list == api_inst.LIST_UPGRADABLE: + if not quiet: + img = api_inst._img + cat = img.get_catalog(img.IMG_CATALOG_INSTALLED) + if cat.package_count > 0: + err = { + "reason": _( + "no packages have " + "newer versions " + "available" + ) + } + else: + err = {"reason": _("no packages are " "installed")} + errors_json.append(err) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + elif pkg_list == api_inst.LIST_REMOVABLE: + if not quiet: + err = { + "reason": _("no installed packages " "are removable") + } + errors_json.append(err) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + else: + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + api_inst.log_operation_end() + return __prepare_json(EXIT_OK, data=data, errors=errors_json) + except ( + api_errors.InvalidPackageErrors, + api_errors.ActionExecutionError, + api_errors.PermissionsException, + ) as e: + _error_json(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) + except api_errors.CatalogRefreshException as e: + _collect_catalog_failures(e, errors=errors_json) + return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) + except api_errors.InventoryException as e: + if e.illegal: + for i in e.illegal: + _error_json(i, errors_json=errors_json) + api_inst.log_operation_end(result=RESULT_FAILED_BAD_REQUEST) + return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) + + if quiet: + # Collect nothing. + pass + elif ( + pkg_list == api.ImageInterface.LIST_ALL + or pkg_list == api.ImageInterface.LIST_NEWEST + ): + _error_json( + _("no known packages matching:\n {0}").format( + "\n ".join(e.notfound) + ), + cmd=op, + errors_json=errors_json, + errorType="inventory", + ) + elif pkg_list == api.ImageInterface.LIST_INSTALLED_NEWEST: + _error_json( + _( + "no packages matching the following " + "patterns are allowed by installed " + "incorporations, or image variants that are known " + "or installed\n {0}" + ).format("\n ".join(e.notfound)), + cmd=op, + errors_json=errors_json, + errorType="inventory_extra", + ) + elif pkg_list == api.ImageInterface.LIST_UPGRADABLE: + # Creating a list of packages that are uptodate + # and that are not installed on the system. + no_updates = [] + not_installed = [] + try: + for entry in api_inst.get_pkg_list( + api.ImageInterface.LIST_INSTALLED, + patterns=e.notfound, + raise_unmatched=True, + ): + pub, stem, ver = entry[0] + no_updates.append(stem) + except api_errors.InventoryException as exc: + not_installed = exc.notfound + + err_str = "" + if not_installed: + err_str = _( + "no packages matching the " + "following patterns are installed:\n {0}" + ).format("\n ".join(not_installed)) + + if no_updates: + err_str = err_str + _( + "no updates are " + "available for the following packages:\n " + "{0}" + ).format("\n ".join(no_updates)) + if err_str: + _error_json( + err_str, + cmd=op, + errors_json=errors_json, + errorType="inventory", + ) + else: + _error_json( + _( + "no packages matching the following " + "patterns are installed:\n {0}" + ).format("\n ".join(e.notfound)), + cmd=op, + errors_json=errors_json, + errorType="inventory", + ) + + if found and e.notfound: + # Only some patterns matched. + api_inst.log_operation_end() + return __prepare_json(EXIT_PARTIAL, data=data, errors=errors_json) + api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) + return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) - # Now get the matching list of packages and display it. - found = False - data = [] +def _get_tracker(prog_delay=PROG_DELAY, prog_tracker=None): + if prog_tracker: + return prog_tracker + elif global_settings.client_output_parsable_version is not None: + progresstracker = progress.NullProgressTracker() + elif global_settings.client_output_quiet: + progresstracker = progress.QuietProgressTracker() + elif global_settings.client_output_progfd: + # This logic handles linked images: for linked children + # we elide the progress output. + output_file = os.fdopen(global_settings.client_output_progfd, "w") + child_tracker = progress.LinkedChildProgressTracker( + output_file=output_file + ) + dot_tracker = progress.DotProgressTracker( + term_delay=prog_delay, output_file=output_file + ) + progresstracker = progress.MultiProgressTracker( + [child_tracker, dot_tracker] + ) + else: try: - res = api_inst.get_pkg_list(pkg_list, patterns=pargs, - raise_unmatched=True, repos=origins, variants=variants) - for pt, summ, cats, states, attrs in res: - found = True - entry = {} - pub, stem, ver = pt - entry["pub"] = pub - entry["pkg"] = stem - entry["version"] = ver - entry["summary"] = summ - - stateslist = [] - for sentry in state_map: - for s, v in sentry: - if s in states: - stateslist.append(v) - break - entry["states"] = stateslist - data.append(entry) - if not found and not pargs: - if pkg_list == api_inst.LIST_INSTALLED: - if not quiet: - err = {"reason": - _("no packages installed")} - errors_json.append(err) - api_inst.log_operation_end( - result=RESULT_NOTHING_TO_DO) - elif pkg_list == api_inst.LIST_INSTALLED_NEWEST: - if not quiet: - err = {"reason": _("no packages " - "installed or available for " - "installation")} - errors_json.append(err) - api_inst.log_operation_end( - result=RESULT_NOTHING_TO_DO) - elif pkg_list == api_inst.LIST_UPGRADABLE: - if not quiet: - img = api_inst._img - cat = img.get_catalog( - img.IMG_CATALOG_INSTALLED) - if cat.package_count > 0: - err = {"reason": - _("no packages have " - "newer versions " - "available")} - else: - err = {"reason": - _("no packages are " - "installed")} - errors_json.append(err) - api_inst.log_operation_end( - result=RESULT_NOTHING_TO_DO) - elif pkg_list == api_inst.LIST_REMOVABLE: - if not quiet: - err = {"reason": - _("no installed packages " - "are removable")} - errors_json.append(err) - api_inst.log_operation_end( - result=RESULT_NOTHING_TO_DO) - else: - api_inst.log_operation_end( - result=RESULT_NOTHING_TO_DO) - return __prepare_json(EXIT_OOPS, - errors=errors_json) - - api_inst.log_operation_end() - return __prepare_json(EXIT_OK, data=data, - errors=errors_json) - except (api_errors.InvalidPackageErrors, - api_errors.ActionExecutionError, - api_errors.PermissionsException) as e: - _error_json(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, data=data, - errors=errors_json) - except api_errors.CatalogRefreshException as e: - _collect_catalog_failures(e, errors=errors_json) - return __prepare_json(EXIT_OOPS, data=data, - errors=errors_json) - except api_errors.InventoryException as e: - if e.illegal: - for i in e.illegal: - _error_json(i, errors_json=errors_json) - api_inst.log_operation_end( - result=RESULT_FAILED_BAD_REQUEST) - return __prepare_json(EXIT_OOPS, data=data, - errors=errors_json) - - if quiet: - # Collect nothing. - pass - elif pkg_list == api.ImageInterface.LIST_ALL or \ - pkg_list == api.ImageInterface.LIST_NEWEST: - _error_json(_("no known packages matching:\n {0}" - ).format("\n ".join(e.notfound)), cmd=op, - errors_json=errors_json, - errorType="inventory") - elif pkg_list == api.ImageInterface.LIST_INSTALLED_NEWEST: - _error_json(_("no packages matching the following " - "patterns are allowed by installed " - "incorporations, or image variants that are known " - "or installed\n {0}").format( - "\n ".join(e.notfound)), cmd=op, - errors_json=errors_json, - errorType="inventory_extra") - elif pkg_list == api.ImageInterface.LIST_UPGRADABLE: - # Creating a list of packages that are uptodate - # and that are not installed on the system. - no_updates = [] - not_installed = [] - try: - for entry in api_inst.get_pkg_list( - api.ImageInterface.LIST_INSTALLED, - patterns=e.notfound, raise_unmatched=True): - pub, stem, ver = entry[0] - no_updates.append(stem) - except api_errors.InventoryException as exc: - not_installed = exc.notfound - - err_str = "" - if not_installed: - err_str = _("no packages matching the " - "following patterns are installed:\n {0}" - ).format("\n ".join(not_installed)) - - if no_updates: - err_str = err_str + _("no updates are " - "available for the following packages:\n " - "{0}").format("\n ".join(no_updates)) - if err_str: - _error_json(err_str, cmd=op, - errors_json=errors_json, - errorType="inventory") - else: - _error_json(_("no packages matching the following " - "patterns are installed:\n {0}").format( - "\n ".join(e.notfound)), cmd=op, - errors_json=errors_json, - errorType="inventory") - - if found and e.notfound: - # Only some patterns matched. - api_inst.log_operation_end() - return __prepare_json(EXIT_PARTIAL, data=data, - errors=errors_json) - api_inst.log_operation_end(result=RESULT_NOTHING_TO_DO) - return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) + progresstracker = progress.FancyUNIXProgressTracker( + term_delay=prog_delay + ) + except progress.ProgressTrackerException: + progresstracker = progress.CommandLineProgressTracker( + term_delay=prog_delay + ) + return progresstracker -def _get_tracker(prog_delay=PROG_DELAY, prog_tracker=None): - if prog_tracker: - return prog_tracker - elif global_settings.client_output_parsable_version is not None: - progresstracker = progress.NullProgressTracker() - elif global_settings.client_output_quiet: - progresstracker = progress.QuietProgressTracker() - elif global_settings.client_output_progfd: - # This logic handles linked images: for linked children - # we elide the progress output. - output_file = os.fdopen(global_settings.client_output_progfd, - "w") - child_tracker = progress.LinkedChildProgressTracker( - output_file=output_file) - dot_tracker = progress.DotProgressTracker( - term_delay=prog_delay, output_file=output_file) - progresstracker = progress.MultiProgressTracker( - [child_tracker, dot_tracker]) - else: - try: - progresstracker = progress.FancyUNIXProgressTracker( - term_delay=prog_delay) - except progress.ProgressTrackerException: - progresstracker = progress.CommandLineProgressTracker( - term_delay=prog_delay) - return progresstracker def _accept_plan_licenses(api_inst): - """Helper function that marks all licenses for the current plan as - accepted if they require acceptance.""" - - plan = api_inst.describe() - for pfmri, src, dest, accepted, displayed in plan.get_licenses(): - if not dest.must_accept: - continue - api_inst.set_plan_license_status(pfmri, dest.license, - accepted=True) - -display_plan_options = ["basic", "fmris", "variants/facets", "services", - "actions", "boot-archive"] - -def __api_alloc(pkg_image, orig_cwd, prog_delay=PROG_DELAY, prog_tracker=None, - errors_json=None): - """Allocate API instance.""" - - provided_image_dir = True - pkg_image_used = False - - if pkg_image: - imgdir = pkg_image - - if "imgdir" not in locals(): - imgdir, provided_image_dir = api.get_default_image_root( - orig_cwd=orig_cwd) - if os.environ.get("PKG_IMAGE"): - # It's assumed that this has been checked by the above - # function call and hasn't been removed from the - # environment. - pkg_image_used = True - - if not imgdir: - if errors_json: - err = {"reason": "Could not find image. Set the " - "pkg_image property to the\nlocation of an image."} - errors_json.append(err) - return - - progresstracker = _get_tracker(prog_delay=prog_delay, - prog_tracker=prog_tracker) - try: - return api.ImageInterface(imgdir, CLIENT_API_VERSION, - progresstracker, None, PKG_CLIENT_NAME, - exact_match=provided_image_dir) - except api_errors.ImageNotFoundException as e: - if e.user_specified: - if pkg_image_used: - _error_json(_("No image rooted at '{0}' " - "(set by $PKG_IMAGE)").format(e.user_dir), - errors_json=errors_json) - else: - _error_json(_("No image rooted at '{0}'") - .format(e.user_dir), errors_json=errors_json) - else: - _error_json(_("No image found."), - errors_json=errors_json) - return - except api_errors.PermissionsException as e: - _error_json(e, errors_json=errors_json) - return - except api_errors.ImageFormatUpdateNeeded as e: - _format_update_error(e, errors_json=errors_json) - return + """Helper function that marks all licenses for the current plan as + accepted if they require acceptance.""" + + plan = api_inst.describe() + for pfmri, src, dest, accepted, displayed in plan.get_licenses(): + if not dest.must_accept: + continue + api_inst.set_plan_license_status(pfmri, dest.license, accepted=True) + + +display_plan_options = [ + "basic", + "fmris", + "variants/facets", + "services", + "actions", + "boot-archive", +] + + +def __api_alloc( + pkg_image, + orig_cwd, + prog_delay=PROG_DELAY, + prog_tracker=None, + errors_json=None, +): + """Allocate API instance.""" + + provided_image_dir = True + pkg_image_used = False + + if pkg_image: + imgdir = pkg_image + + if "imgdir" not in locals(): + imgdir, provided_image_dir = api.get_default_image_root( + orig_cwd=orig_cwd + ) + if os.environ.get("PKG_IMAGE"): + # It's assumed that this has been checked by the above + # function call and hasn't been removed from the + # environment. + pkg_image_used = True + + if not imgdir: + if errors_json: + err = { + "reason": "Could not find image. Set the " + "pkg_image property to the\nlocation of an image." + } + errors_json.append(err) + return + + progresstracker = _get_tracker( + prog_delay=prog_delay, prog_tracker=prog_tracker + ) + try: + return api.ImageInterface( + imgdir, + CLIENT_API_VERSION, + progresstracker, + None, + PKG_CLIENT_NAME, + exact_match=provided_image_dir, + ) + except api_errors.ImageNotFoundException as e: + if e.user_specified: + if pkg_image_used: + _error_json( + _("No image rooted at '{0}' " "(set by $PKG_IMAGE)").format( + e.user_dir + ), + errors_json=errors_json, + ) + else: + _error_json( + _("No image rooted at '{0}'").format(e.user_dir), + errors_json=errors_json, + ) + else: + _error_json(_("No image found."), errors_json=errors_json) + return + except api_errors.PermissionsException as e: + _error_json(e, errors_json=errors_json) + return + except api_errors.ImageFormatUpdateNeeded as e: + _format_update_error(e, errors_json=errors_json) + return + def __api_prepare_plan(operation, api_inst): - # Exceptions which happen here are printed in the above level, with - # or without some extra decoration done here. - # XXX would be nice to kick the progress tracker. - errors_json =[] - try: - api_inst.prepare() - except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json("\n" + str(e), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.TransportError as e: - raise e - except api_errors.PlanLicenseErrors as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json(_("\nThe following packages require their " - "licenses to be accepted before they can be installed " - "or updated:\n {0}").format(str(e)), - errors_json=errors_json, errorType="plan_license") - return __prepare_json(EXIT_LICENSE, errors=errors_json) - except api_errors.InvalidPlanError as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json("\n" + str(e), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ImageFormatUpdateNeeded as e: - _format_update_error(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ImageInsufficentSpace as e: - _error_json(str(e), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.LinkedImageException as e: - _error_json(str(e), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except KeyboardInterrupt: - raise - except Exception as e: - _error_json(_("\nAn unexpected error happened while preparing " - "for {op}: {err}").format(op=operation, err=str(e))) - return __prepare_json(EXIT_OOPS, errors=errors_json) - return __prepare_json(EXIT_OK) + # Exceptions which happen here are printed in the above level, with + # or without some extra decoration done here. + # XXX would be nice to kick the progress tracker. + errors_json = [] + try: + api_inst.prepare() + except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json("\n" + str(e), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.TransportError as e: + raise e + except api_errors.PlanLicenseErrors as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json( + _( + "\nThe following packages require their " + "licenses to be accepted before they can be installed " + "or updated:\n {0}" + ).format(str(e)), + errors_json=errors_json, + errorType="plan_license", + ) + return __prepare_json(EXIT_LICENSE, errors=errors_json) + except api_errors.InvalidPlanError as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json("\n" + str(e), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ImageFormatUpdateNeeded as e: + _format_update_error(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ImageInsufficentSpace as e: + _error_json(str(e), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.LinkedImageException as e: + _error_json(str(e), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except KeyboardInterrupt: + raise + except Exception as e: + _error_json( + _( + "\nAn unexpected error happened while preparing " + "for {op}: {err}" + ).format(op=operation, err=str(e)) + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + return __prepare_json(EXIT_OK) + def __api_execute_plan(operation, api_inst): - rval = None - errors_json = [] + rval = None + errors_json = [] + try: + api_inst.execute_plan() + pd = api_inst.describe() + if pd.actuator_timed_out: + rval = __prepare_json(EXIT_ACTUATOR) + else: + rval = __prepare_json(EXIT_OK) + except RuntimeError as e: + _error_json( + _("{operation} failed: {err}").format(operation=operation, err=e), + errors_json=errors_json, + ) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except ( + api_errors.InvalidPlanError, + api_errors.ActionExecutionError, + api_errors.InvalidPackageErrors, + api_errors.PlanExclusionError, + ) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json("\n" + str(e), errors_json=errors_json) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.LinkedImageException as e: + _error_json( + _( + "{operation} failed (linked image exception(s))" ":\n{err}" + ).format(operation=operation, err=e), + errors_json=errors_json, + ) + rval = __prepare_json(e.lix_exitrv, errors=errors_json) + except api_errors.ImageUpdateOnLiveImageException: + _error_json( + _("{0} cannot be done on live image").format(operation), + errors_json=errors_json, + ) + rval = __prepare_json(EXIT_NOTLIVE, errors=errors_json) + except api_errors.RebootNeededOnLiveImageException: + _error_json( + _( + 'Requested "{0}" operation would affect files ' + "that cannot be modified in live image.\n" + "Please retry this operation on an alternate boot " + "environment." + ).format(operation), + errors_json=errors_json, + ) + rval = __prepare_json(EXIT_NOTLIVE, errors=errors_json) + except api_errors.CorruptedIndexException as e: + _error_json( + "The search index appears corrupted. Please " + "rebuild the index with 'pkg rebuild-index'.", + errors_json=errors_json, + ) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ProblematicPermissionsIndexException as e: + _error_json(str(e), errors_json=errors_json) + _error_json( + _( + "\n(Failure to consistently execute pkg commands " + "as a privileged user is often a source of this problem.)" + ), + errors_json=errors_json, + ) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json("\n" + str(e), errors_json=errors_json) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ImageFormatUpdateNeeded as e: + _format_update_error(e, errors_json=errors_json) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.BEException as e: + _error_json(e, errors_json=errors_json) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.WrapSuccessfulIndexingException: + raise + except api_errors.ImageInsufficentSpace as e: + _error_json(str(e), errors_json=errors_json) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.InvalidMediatorTarget as e: + _error_json(str(e), errors_json=errors_json) + # An invalid target means the operation completed but + # the user needs to consider the state of any image so + # return a EXIT_PARTIAL. + rval = __prepare_json(EXIT_PARTIAL, errors=errors_json) + except Exception as e: + _error_json( + _( + "An unexpected error happened during " "{operation}: {err}" + ).format(operation=operation, err=e), + errors_json=errors_json, + ) + rval = __prepare_json(EXIT_OOPS, errors=errors_json) + finally: + exc_type = exc_value = exc_tb = None + if rval is None: + # Store original exception so that the real cause of + # failure can be raised if this fails. + exc_type, exc_value, exc_tb = sys.exc_info() + try: - api_inst.execute_plan() - pd = api_inst.describe() - if pd.actuator_timed_out: - rval = __prepare_json(EXIT_ACTUATOR) - else: - rval = __prepare_json(EXIT_OK) - except RuntimeError as e: - _error_json(_("{operation} failed: {err}").format( - operation=operation, err=e), errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except (api_errors.InvalidPlanError, - api_errors.ActionExecutionError, - api_errors.InvalidPackageErrors, - api_errors.PlanExclusionError) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json("\n" + str(e), errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except (api_errors.LinkedImageException) as e: - _error_json(_("{operation} failed (linked image exception(s))" - ":\n{err}").format(operation=operation, err=e), - errors_json=errors_json) - rval = __prepare_json(e.lix_exitrv, errors=errors_json) - except api_errors.ImageUpdateOnLiveImageException: - _error_json(_("{0} cannot be done on live image").format( - operation), errors_json=errors_json) - rval = __prepare_json(EXIT_NOTLIVE, errors=errors_json) - except api_errors.RebootNeededOnLiveImageException: - _error_json(_("Requested \"{0}\" operation would affect files " - "that cannot be modified in live image.\n" - "Please retry this operation on an alternate boot " - "environment.").format(operation), errors_json=errors_json) - rval = __prepare_json(EXIT_NOTLIVE, errors=errors_json) - except api_errors.CorruptedIndexException as e: - _error_json("The search index appears corrupted. Please " - "rebuild the index with 'pkg rebuild-index'.", - errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ProblematicPermissionsIndexException as e: - _error_json(str(e), errors_json=errors_json) - _error_json(_("\n(Failure to consistently execute pkg commands " - "as a privileged user is often a source of this problem.)"), - errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except (api_errors.PermissionsException, api_errors.UnknownErrors) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json("\n" + str(e), errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ImageFormatUpdateNeeded as e: - _format_update_error(e, errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.BEException as e: - _error_json(e, errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.WrapSuccessfulIndexingException: + salvaged = api_inst.describe().salvaged + newbe = api_inst.describe().new_be + stat = None + if rval: + stat = rval["status"] + if salvaged and (stat == EXIT_OK or not newbe): + # Only show salvaged file list if populated + # and operation was successful, or if operation + # failed and a new BE was not created for + # the operation. + err = _( + "\nThe following " + "unexpected or editable files and " + "directories were\n" + "salvaged while executing the requested " + "package operation; they\nhave been moved " + "to the displayed location in the image:\n" + ) + for opath, spath in salvaged: + err += " {0} -> {1}\n".format(opath, spath) + errors_json.append({"info": err}) + except Exception: + if rval is not None: + # Only raise exception encountered here if the + # exception previously raised was suppressed. raise - except api_errors.ImageInsufficentSpace as e: - _error_json(str(e), errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.InvalidMediatorTarget as e: - _error_json(str(e), errors_json=errors_json) - # An invalid target means the operation completed but - # the user needs to consider the state of any image so - # return a EXIT_PARTIAL. - rval = __prepare_json(EXIT_PARTIAL, errors=errors_json) - except Exception as e: - _error_json(_("An unexpected error happened during " - "{operation}: {err}").format( - operation=operation, err=e), errors_json=errors_json) - rval = __prepare_json(EXIT_OOPS, errors=errors_json) - finally: - exc_type = exc_value = exc_tb = None - if rval is None: - # Store original exception so that the real cause of - # failure can be raised if this fails. - exc_type, exc_value, exc_tb = sys.exc_info() - - try: - salvaged = api_inst.describe().salvaged - newbe = api_inst.describe().new_be - stat = None - if rval: - stat = rval["status"] - if salvaged and (stat == EXIT_OK or not newbe): - # Only show salvaged file list if populated - # and operation was successful, or if operation - # failed and a new BE was not created for - # the operation. - err = _("\nThe following " - "unexpected or editable files and " - "directories were\n" - "salvaged while executing the requested " - "package operation; they\nhave been moved " - "to the displayed location in the image:\n") - for opath, spath in salvaged: - err += " {0} -> {1}\n".format(opath, - spath) - errors_json.append({"info": err}) - except Exception: - if rval is not None: - # Only raise exception encountered here if the - # exception previously raised was suppressed. - raise - - if exc_value or exc_tb: - if six.PY2: - six.reraise(exc_value, None, exc_tb) - else: - raise exc_value - - return rval - -def __api_plan_exception(op, noexecute, verbose, api_inst, errors_json=[], - display_plan_cb=None): - e_type, e, e_traceback = sys.exc_info() - - if e_type == api_errors.ImageNotFoundException: - _error_json(_("No image rooted at '{0}'").format(e.user_dir), - cmd=op, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - if e_type == api_errors.ImageLockingFailedError: - _error_json(_(e), cmd=op, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - if e_type == api_errors.InventoryException: - _error_json("\n" + _("{operation} failed (inventory exception):\n" - "{err}").format(operation=op, err=e), - errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - if isinstance(e, api_errors.LinkedImageException): - _error_json(_("{operation} failed (linked image exception(s)):\n" - "{err}").format(operation=op, err=e), - errors_json=errors_json) - return __prepare_json(e.lix_exitrv, errors=errors_json) - if e_type == api_errors.IpkgOutOfDateException: - error ={"info": _("""\ + + if exc_value or exc_tb: + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + + return rval + + +def __api_plan_exception( + op, noexecute, verbose, api_inst, errors_json=[], display_plan_cb=None +): + e_type, e, e_traceback = sys.exc_info() + + if e_type == api_errors.ImageNotFoundException: + _error_json( + _("No image rooted at '{0}'").format(e.user_dir), + cmd=op, + errors_json=errors_json, + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + if e_type == api_errors.ImageLockingFailedError: + _error_json(_(e), cmd=op, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + if e_type == api_errors.InventoryException: + _error_json( + "\n" + + _("{operation} failed (inventory exception):\n" "{err}").format( + operation=op, err=e + ), + errors_json=errors_json, + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + if isinstance(e, api_errors.LinkedImageException): + _error_json( + _( + "{operation} failed (linked image exception(s)):\n" "{err}" + ).format(operation=op, err=e), + errors_json=errors_json, + ) + return __prepare_json(e.lix_exitrv, errors=errors_json) + if e_type == api_errors.IpkgOutOfDateException: + error = { + "info": _( + """\ WARNING: pkg(7) appears to be out of date, and should be updated before running {op}. Please update pkg(7) by executing 'pkg install pkg:/package/pkg' as a privileged user and then retry the {op}.""" - ).format(**locals())} - errors_json.append(error) - return __prepare_json(EXIT_OOPS, errors=errors_json) - if e_type == api_errors.CatalogRefreshException: - _collect_catalog_failures(e, errors=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - if e_type == api_errors.ConflictingActionErrors or \ - e_type == api_errors.ImageBoundaryErrors: - if verbose and display_plan_cb: - display_plan_cb(api_inst, verbose=verbose, - noexecute=noexecute, plan_only=True) - _error_json("\n" + str(e), cmd=op, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - if e_type == api_errors.ImageFormatUpdateNeeded: - _format_update_error(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - - if e_type == api_errors.ImageUpdateOnLiveImageException: - _error_json("\n" + _("The proposed operation cannot be " - "performed on a live image."), cmd=op, - errors_json=errors_json) - return __prepare_json(EXIT_NOTLIVE, errors=errors_json) - - if issubclass(e_type, api_errors.BEException): - _error_json("\n" + str(e), cmd=op, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - - if e_type == api_errors.PlanCreationException: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - txt = str(e) - if e.multiple_matches: - txt += "\n\n" + _("Please provide one of the package " - "FMRIs listed above to the install command.") - _error_json("\n" + txt, cmd=op, errors_json=errors_json) - if verbose: - err_txt = "\n".join(e.verbose_info) - if err_txt: - errors_json.append({"reason": err_txt}) - if e.invalid_mediations: - # Bad user input for mediation. - return __prepare_json(EXIT_BADOPT, errors=errors_json) - if e.no_solution: - return __prepare_json(EXIT_CONSTRAINED, errors=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - - if isinstance(e, (api_errors.CertificateError, + ).format(**locals()) + } + errors_json.append(error) + return __prepare_json(EXIT_OOPS, errors=errors_json) + if e_type == api_errors.CatalogRefreshException: + _collect_catalog_failures(e, errors=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + if ( + e_type == api_errors.ConflictingActionErrors + or e_type == api_errors.ImageBoundaryErrors + ): + if verbose and display_plan_cb: + display_plan_cb( + api_inst, verbose=verbose, noexecute=noexecute, plan_only=True + ) + _error_json("\n" + str(e), cmd=op, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + if e_type == api_errors.ImageFormatUpdateNeeded: + _format_update_error(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + if e_type == api_errors.ImageUpdateOnLiveImageException: + _error_json( + "\n" + + _( + "The proposed operation cannot be " "performed on a live image." + ), + cmd=op, + errors_json=errors_json, + ) + return __prepare_json(EXIT_NOTLIVE, errors=errors_json) + + if issubclass(e_type, api_errors.BEException): + _error_json("\n" + str(e), cmd=op, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + if e_type == api_errors.PlanCreationException: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + txt = str(e) + if e.multiple_matches: + txt += "\n\n" + _( + "Please provide one of the package " + "FMRIs listed above to the install command." + ) + _error_json("\n" + txt, cmd=op, errors_json=errors_json) + if verbose: + err_txt = "\n".join(e.verbose_info) + if err_txt: + errors_json.append({"reason": err_txt}) + if e.invalid_mediations: + # Bad user input for mediation. + return __prepare_json(EXIT_BADOPT, errors=errors_json) + if e.no_solution: + return __prepare_json(EXIT_CONSTRAINED, errors=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + if isinstance( + e, + ( + api_errors.CertificateError, api_errors.UnknownErrors, api_errors.PermissionsException, api_errors.InvalidPropertyValue, @@ -1183,2202 +1379,2788 @@ def __api_plan_exception(op, noexecute, verbose, api_inst, errors_json=[], api_errors.ActionExecutionError, api_errors.InvalidPackageErrors, api_errors.ImageBoundaryErrors, - api_errors.InvalidVarcetNames)): - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json("\n" + str(e), cmd=op, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) + api_errors.InvalidVarcetNames, + ), + ): + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json("\n" + str(e), cmd=op, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + # if we didn't deal with the exception above, pass it on. + raise + # NOTREACHED + + +def __api_plan( + _op, + _api_inst, + _accept=False, + _li_ignore=None, + _noexecute=False, + _omit_headers=False, + _origins=None, + _parsable_version=None, + _quiet=False, + _quiet_plan=False, + _review_release_notes=False, + _show_licenses=False, + _stage=API_STAGE_DEFAULT, + _verbose=0, + display_plan_cb=None, + logger=None, + _unpackaged=False, + _unpackaged_only=False, + _verify_paths=EmptyI, + **kwargs, +): + # All the api interface functions that we invoke have some + # common arguments. Set those up now. + if _op not in ( + PKG_OP_REVERT, + PKG_OP_FIX, + PKG_OP_VERIFY, + PKG_OP_DEHYDRATE, + PKG_OP_REHYDRATE, + ): + kwargs["li_ignore"] = _li_ignore + if _op == PKG_OP_VERIFY: + kwargs["unpackaged"] = _unpackaged + kwargs["unpackaged_only"] = _unpackaged_only + kwargs["verify_paths"] = _verify_paths + elif _op == PKG_OP_FIX: + kwargs["unpackaged"] = _unpackaged + + kwargs["noexecute"] = _noexecute + if _origins: + kwargs["repos"] = _origins + if _stage != API_STAGE_DEFAULT: + kwargs["pubcheck"] = False + + # display plan debugging information + if _verbose > 2: + DebugValues.set_value("plan", "True") + + # plan the requested operation + stuff_to_do = None + + if _op == PKG_OP_ATTACH: + api_plan_func = _api_inst.gen_plan_attach + elif _op in [PKG_OP_CHANGE_FACET, PKG_OP_CHANGE_VARIANT]: + api_plan_func = _api_inst.gen_plan_change_varcets + elif _op == PKG_OP_DEHYDRATE: + api_plan_func = _api_inst.gen_plan_dehydrate + elif _op == PKG_OP_DETACH: + api_plan_func = _api_inst.gen_plan_detach + elif _op == PKG_OP_EXACT_INSTALL: + api_plan_func = _api_inst.gen_plan_exact_install + elif _op == PKG_OP_FIX: + api_plan_func = _api_inst.gen_plan_fix + elif _op == PKG_OP_INSTALL: + api_plan_func = _api_inst.gen_plan_install + elif _op == PKG_OP_REHYDRATE: + api_plan_func = _api_inst.gen_plan_rehydrate + elif _op == PKG_OP_REVERT: + api_plan_func = _api_inst.gen_plan_revert + elif _op == PKG_OP_SYNC: + api_plan_func = _api_inst.gen_plan_sync + elif _op == PKG_OP_UNINSTALL: + api_plan_func = _api_inst.gen_plan_uninstall + elif _op == PKG_OP_UPDATE: + api_plan_func = _api_inst.gen_plan_update + elif _op == PKG_OP_VERIFY: + api_plan_func = _api_inst.gen_plan_verify + else: + raise RuntimeError("__api_plan() invalid op: {0}".format(_op)) + + errors_json = [] + planned_self = False + child_plans = [] + try: + for pd in api_plan_func(**kwargs): + if planned_self: + # we don't display anything for child images + # since they currently do their own display + # work (unless parsable output is requested). + child_plans.append(pd) + continue + + # the first plan description is always for ourself. + planned_self = True + pkg_timer.record("planning", logger=logger) + + # if we're in parsable mode don't display anything + # until after we finish planning for all children + if _parsable_version is None and display_plan_cb: + display_plan_cb( + _api_inst, + [], + _noexecute, + _omit_headers, + _op, + _parsable_version, + _quiet, + _quiet_plan, + _show_licenses, + _stage, + _verbose, + _unpackaged, + _unpackaged_only, + ) + + # if requested accept licenses for child images. we + # have to do this before recursing into children. + if _accept: + _accept_plan_licenses(_api_inst) + except: + ret = __api_plan_exception( + _op, + _noexecute, + _verbose, + _api_inst, + errors_json=errors_json, + display_plan_cb=display_plan_cb, + ) + if ret["status"] != EXIT_OK: + pkg_timer.record("planning", logger=logger) + return ret + + if not planned_self: + # if we got an exception we didn't do planning for children + pkg_timer.record("planning", logger=logger) + + elif _api_inst.isparent(_li_ignore): + # if we didn't get an exception and we're a parent image then + # we should have done planning for child images. + pkg_timer.record("planning children", logger=logger) + + # if we didn't display our own plan (due to an exception), or if we're + # in parsable mode, then display our plan now. + parsable_plan = None + if not planned_self or _parsable_version is not None: + try: + if display_plan_cb: + display_plan_cb( + _api_inst, + child_plans, + _noexecute, + _omit_headers, + _op, + _parsable_version, + _quiet, + _quiet_plan, + _show_licenses, + _stage, + _verbose, + _unpackaged, + _unpackaged_only, + ) + else: + plan = _api_inst.describe() + parsable_plan = plan.get_parsable_plan( + _parsable_version, child_plans, api_inst=_api_inst + ) + # Convert to json. + parsable_plan = json.loads(json.dumps(parsable_plan)) + except api_errors.ApiException as e: + _error_json(e, cmd=_op, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) - # if we didn't deal with the exception above, pass it on. - raise - # NOTREACHED - -def __api_plan(_op, _api_inst, _accept=False, _li_ignore=None, _noexecute=False, - _omit_headers=False, _origins=None, _parsable_version=None, _quiet=False, - _quiet_plan=False, _review_release_notes=False, _show_licenses=False, - _stage=API_STAGE_DEFAULT, _verbose=0, display_plan_cb=None, logger=None, - _unpackaged=False, _unpackaged_only=False, _verify_paths=EmptyI, **kwargs): - - # All the api interface functions that we invoke have some - # common arguments. Set those up now. - if _op not in (PKG_OP_REVERT, PKG_OP_FIX, PKG_OP_VERIFY, - PKG_OP_DEHYDRATE, PKG_OP_REHYDRATE): - kwargs["li_ignore"] = _li_ignore - if _op == PKG_OP_VERIFY: - kwargs["unpackaged"] = _unpackaged - kwargs["unpackaged_only"] = _unpackaged_only - kwargs["verify_paths"] = _verify_paths - elif _op == PKG_OP_FIX: - kwargs["unpackaged"] = _unpackaged - - kwargs["noexecute"] = _noexecute - if _origins: - kwargs["repos"] = _origins - if _stage != API_STAGE_DEFAULT: - kwargs["pubcheck"] = False - - # display plan debugging information - if _verbose > 2: - DebugValues.set_value("plan", "True") - - # plan the requested operation - stuff_to_do = None - - if _op == PKG_OP_ATTACH: - api_plan_func = _api_inst.gen_plan_attach - elif _op in [PKG_OP_CHANGE_FACET, PKG_OP_CHANGE_VARIANT]: - api_plan_func = _api_inst.gen_plan_change_varcets - elif _op == PKG_OP_DEHYDRATE: - api_plan_func = _api_inst.gen_plan_dehydrate - elif _op == PKG_OP_DETACH: - api_plan_func = _api_inst.gen_plan_detach - elif _op == PKG_OP_EXACT_INSTALL: - api_plan_func = _api_inst.gen_plan_exact_install - elif _op == PKG_OP_FIX: - api_plan_func = _api_inst.gen_plan_fix - elif _op == PKG_OP_INSTALL: - api_plan_func = _api_inst.gen_plan_install - elif _op == PKG_OP_REHYDRATE: - api_plan_func = _api_inst.gen_plan_rehydrate - elif _op == PKG_OP_REVERT: - api_plan_func = _api_inst.gen_plan_revert - elif _op == PKG_OP_SYNC: - api_plan_func = _api_inst.gen_plan_sync - elif _op == PKG_OP_UNINSTALL: - api_plan_func = _api_inst.gen_plan_uninstall - elif _op == PKG_OP_UPDATE: - api_plan_func = _api_inst.gen_plan_update - elif _op == PKG_OP_VERIFY: - api_plan_func = _api_inst.gen_plan_verify - else: - raise RuntimeError("__api_plan() invalid op: {0}".format(_op)) + # if we didn't accept licenses (due to an exception) then do that now. + if not planned_self and _accept: + _accept_plan_licenses(_api_inst) - errors_json = [] - planned_self = False - child_plans = [] - try: - for pd in api_plan_func(**kwargs): - if planned_self: - # we don't display anything for child images - # since they currently do their own display - # work (unless parsable output is requested). - child_plans.append(pd) - continue - - # the first plan description is always for ourself. - planned_self = True - pkg_timer.record("planning", logger=logger) - - # if we're in parsable mode don't display anything - # until after we finish planning for all children - if _parsable_version is None and display_plan_cb: - display_plan_cb(_api_inst, [], _noexecute, - _omit_headers, _op, _parsable_version, - _quiet, _quiet_plan, _show_licenses, - _stage, _verbose, _unpackaged, - _unpackaged_only) - - # if requested accept licenses for child images. we - # have to do this before recursing into children. - if _accept: - _accept_plan_licenses(_api_inst) - except: - ret = __api_plan_exception(_op, _noexecute, _verbose, - _api_inst, errors_json=errors_json, - display_plan_cb=display_plan_cb) - if ret["status"] != EXIT_OK: - pkg_timer.record("planning", logger=logger) - return ret - - if not planned_self: - # if we got an exception we didn't do planning for children - pkg_timer.record("planning", logger=logger) - - elif _api_inst.isparent(_li_ignore): - # if we didn't get an exception and we're a parent image then - # we should have done planning for child images. - pkg_timer.record("planning children", logger=logger) - - # if we didn't display our own plan (due to an exception), or if we're - # in parsable mode, then display our plan now. - parsable_plan = None - if not planned_self or _parsable_version is not None: - try: - if display_plan_cb: - display_plan_cb(_api_inst, child_plans, - _noexecute, _omit_headers, _op, - _parsable_version, _quiet, _quiet_plan, - _show_licenses, _stage, _verbose, - _unpackaged, _unpackaged_only) - else: - plan = _api_inst.describe() - parsable_plan =plan.get_parsable_plan( - _parsable_version, child_plans, - api_inst=_api_inst) - # Convert to json. - parsable_plan = json.loads(json.dumps( - parsable_plan)) - except api_errors.ApiException as e: - _error_json(e, cmd=_op, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - - # if we didn't accept licenses (due to an exception) then do that now. - if not planned_self and _accept: - _accept_plan_licenses(_api_inst) + data = {} + if parsable_plan: + data["plan"] = parsable_plan - data = {} - if parsable_plan: - data["plan"] = parsable_plan + return __prepare_json(EXIT_OK, data=data) - return __prepare_json(EXIT_OK, data=data) def __api_plan_file(api_inst): - """Return the path to the PlanDescription save file.""" + """Return the path to the PlanDescription save file.""" + + plandir = api_inst.img_plandir + return os.path.join(plandir, "plandesc") - plandir = api_inst.img_plandir - return os.path.join(plandir, "plandesc") def __api_plan_save(api_inst, logger=None): - """Save an image plan to a file.""" + """Save an image plan to a file.""" + + # get a pointer to the plan + plan = api_inst.describe() + + # save the PlanDescription to a file + path = __api_plan_file(api_inst) + oflags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY + try: + fd = os.open(path, oflags, 0o644) + with os.fdopen(fd, "w") as fobj: + plan._save(fobj) + + # cleanup any old style imageplan save files + for f in os.listdir(api_inst.img_plandir): + path = os.path.join(api_inst.img_plandir, f) + if re.search(r"^actions\.[0-9]+\.json$", f): + os.unlink(path) + if re.search(r"^pkgs\.[0-9]+\.json$", f): + os.unlink(path) + except OSError as e: + raise api_errors._convert_error(e) - # get a pointer to the plan - plan = api_inst.describe() + pkg_timer.record("saving plan", logger=logger) - # save the PlanDescription to a file - path = __api_plan_file(api_inst) - oflags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY - try: - fd = os.open(path, oflags, 0o644) - with os.fdopen(fd, "w") as fobj: - plan._save(fobj) - - # cleanup any old style imageplan save files - for f in os.listdir(api_inst.img_plandir): - path = os.path.join(api_inst.img_plandir, f) - if re.search(r"^actions\.[0-9]+\.json$", f): - os.unlink(path) - if re.search(r"^pkgs\.[0-9]+\.json$", f): - os.unlink(path) - except OSError as e: - raise api_errors._convert_error(e) - - pkg_timer.record("saving plan", logger=logger) def __api_plan_load(api_inst, stage, origins, logger=None): - """Loan an image plan from a file.""" + """Loan an image plan from a file.""" - # load an existing plan - path = __api_plan_file(api_inst) - plan = api.PlanDescription() - try: - with open(path) as fobj: - plan._load(fobj) - except OSError as e: - raise api_errors._convert_error(e) + # load an existing plan + path = __api_plan_file(api_inst) + plan = api.PlanDescription() + try: + with open(path) as fobj: + plan._load(fobj) + except OSError as e: + raise api_errors._convert_error(e) - pkg_timer.record("loading plan", logger=logger) + pkg_timer.record("loading plan", logger=logger) - api_inst.reset() - api_inst.set_alt_repos(origins) - api_inst.load_plan(plan, prepared=(stage == API_STAGE_EXECUTE)) - pkg_timer.record("re-initializing plan", logger=logger) + api_inst.reset() + api_inst.set_alt_repos(origins) + api_inst.load_plan(plan, prepared=(stage == API_STAGE_EXECUTE)) + pkg_timer.record("re-initializing plan", logger=logger) - if stage == API_STAGE_EXECUTE: - __api_plan_delete(api_inst) + if stage == API_STAGE_EXECUTE: + __api_plan_delete(api_inst) -def __api_plan_delete(api_inst): - """Delete an image plan file.""" - path = __api_plan_file(api_inst) - try: - os.unlink(path) - except OSError as e: - raise api_errors._convert_error(e) - -def __verify_exit_status(api_inst): - """Determine verify exit status.""" - - plan = api_inst.describe() - for item_id, parent_id, msg_time, msg_level, msg_type, msg_text in \ - plan.gen_item_messages(): - if msg_level == MSG_ERROR: - return EXIT_OOPS - return EXIT_OK - -def __api_op(_op, _api_inst, _accept=False, _li_ignore=None, _noexecute=False, - _origins=None, _parsable_version=None, _quiet=False, _quiet_plan=False, - _review_release_notes=False, _show_licenses=False, - _stage=API_STAGE_DEFAULT, _verbose=0, - _unpackaged=False, _unpackaged_only=False, _verify_paths=EmptyI, - display_plan_cb=None, logger=None, - **kwargs): - """Do something that involves the api. - - Arguments prefixed with '_' are primarily used within this - function. All other arguments must be specified via keyword - assignment and will be passed directly on to the api - interfaces being invoked.""" - - data = {} - if _stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]: - # create a new plan - ret = __api_plan(_op=_op, _api_inst=_api_inst, - _accept=_accept, _li_ignore=_li_ignore, - _noexecute=_noexecute, _origins=_origins, - _parsable_version=_parsable_version, _quiet=_quiet, - _review_release_notes=_review_release_notes, - _show_licenses=_show_licenses, _stage=_stage, - _verbose=_verbose, _quiet_plan=_quiet_plan, - _unpackaged=_unpackaged, _unpackaged_only=_unpackaged_only, - _verify_paths=_verify_paths, display_plan_cb=display_plan_cb, - logger=logger, **kwargs) - - if "_failures" in _api_inst._img.transport.repo_status: - ret.setdefault("data", {}).update( - {"repo_status": - _api_inst._img.transport.repo_status}) - - if ret["status"] != EXIT_OK: - return ret - if "data" in ret: - data.update(ret["data"]) - - if not _noexecute and _stage == API_STAGE_PLAN: - # We always save the plan, even if it is a noop. We - # do this because we want to be able to verify that we - # can load and execute a noop plan. (This mimics - # normal api behavior which doesn't prevent an api - # consumer from creating a noop plan and then - # preparing and executing it.) - __api_plan_save(_api_inst, logger=logger) - # for pkg verify or fix. - if _op in [PKG_OP_FIX, PKG_OP_VERIFY] and _noexecute and \ - _quiet_plan: - exit_code = __verify_exit_status(_api_inst) - return __prepare_json(exit_code, data=data) - if _api_inst.planned_nothingtodo(): - return __prepare_json(EXIT_NOP, data=data) - if _noexecute or _stage == API_STAGE_PLAN: - return __prepare_json(EXIT_OK, data=data) - else: - assert _stage in [API_STAGE_PREPARE, API_STAGE_EXECUTE] - __api_plan_load(_api_inst, _stage, _origins, logger=logger) - - # Exceptions which happen here are printed in the above level, - # with or without some extra decoration done here. - if _stage in [API_STAGE_DEFAULT, API_STAGE_PREPARE]: - ret = __api_prepare_plan(_op, _api_inst) - pkg_timer.record("preparing", logger=logger) - - if ret["status"] != EXIT_OK: - return ret - if _stage == API_STAGE_PREPARE: - return __prepare_json(EXIT_OK, data=data) - - ret = __api_execute_plan(_op, _api_inst) - pkg_timer.record("executing", logger=logger) - - if _review_release_notes and ret["status"] == EXIT_OK and \ - _stage == API_STAGE_DEFAULT and _api_inst.solaris_image(): - data["release_notes_url"] = misc.get_release_notes_url() - ret = __prepare_json(EXIT_OK, data=data) - elif ret["status"] == EXIT_OK and data: - ret = __prepare_json(EXIT_OK, data=data) - - return ret - -def _exact_install(op, api_inst, pargs, - accept, backup_be, backup_be_name, be_activate, be_name, li_ignore, - li_parent_sync, new_be, noexecute, origins, parsable_version, quiet, - refresh_catalogs, reject_pats, show_licenses, update_index, verbose, - display_plan_cb=None, logger=None): - errors_json = [] - if not pargs: - error = {"reason": _("at least one package name required")} - errors_json.append(error) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - - rval, res = _get_fmri_args(api_inst, pargs, cmd=op, - errors_json=errors_json) - if not rval: - return __prepare_json(EXIT_OOPS, errors=errors_json) - - xrval, xres = _get_fmri_args(api_inst, reject_pats, cmd=op, - errors_json=errors_json) - if not xrval: - return __prepare_json(EXIT_OOPS, errors=errors_json) - - return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore, - _noexecute=noexecute, _origins=origins, _quiet=quiet, - _show_licenses=show_licenses, _verbose=verbose, - backup_be=backup_be, backup_be_name=backup_be_name, - be_activate=be_activate, be_name=be_name, - li_parent_sync=li_parent_sync, new_be=new_be, - _parsable_version=parsable_version, pkgs_inst=pargs, - refresh_catalogs=refresh_catalogs, reject_list=reject_pats, - update_index=update_index, display_plan_cb=display_plan_cb, - logger=logger) - -def _install(op, api_inst, pargs, accept, act_timeout, backup_be, - backup_be_name, be_activate, be_name, li_ignore, li_erecurse, - li_parent_sync, new_be, noexecute, origins, parsable_version, quiet, - refresh_catalogs, reject_pats, show_licenses, stage, update_index, - verbose, display_plan_cb=None, logger=None): - """Attempt to take package specified to INSTALLED state. The operands - are interpreted as glob patterns.""" - - errors_json = [] - if not pargs: - error = {"reason": _("at least one package name required")} - errors_json.append(error) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - - rval, res = _get_fmri_args(api_inst, pargs, cmd=op, - errors_json=errors_json) - if not rval: - return __prepare_json(EXIT_OOPS, errors=errors_json) - - xrval, xres = _get_fmri_args(api_inst, reject_pats, cmd=op, - errors_json=errors_json) - if not xrval: - return __prepare_json(EXIT_OOPS, errors=errors_json) - - return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore, - _noexecute=noexecute, _origins=origins, - _parsable_version=parsable_version, _quiet=quiet, - _show_licenses=show_licenses, _stage=stage, _verbose=verbose, - act_timeout=act_timeout, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, li_erecurse=li_erecurse, - li_parent_sync=li_parent_sync, new_be=new_be, pkgs_inst=pargs, - refresh_catalogs=refresh_catalogs, reject_list=reject_pats, - update_index=update_index, display_plan_cb=display_plan_cb, - logger=logger) - -def _update(op, api_inst, pargs, accept, act_timeout, backup_be, backup_be_name, - be_activate, be_name, force, ignore_missing, li_ignore, li_erecurse, - li_parent_sync, new_be, noexecute, origins, parsable_version, quiet, - refresh_catalogs, reject_pats, show_licenses, stage, update_index, verbose, - display_plan_cb=None, logger=None): - """Attempt to take all installed packages specified to latest - version.""" - - errors_json = [] - rval, res = _get_fmri_args(api_inst, pargs, cmd=op, - errors_json=errors_json) - if not rval: - return __prepare_json(EXIT_OOPS, errors=errors_json) +def __api_plan_delete(api_inst): + """Delete an image plan file.""" - xrval, xres = _get_fmri_args(api_inst, reject_pats, cmd=op, - errors_json=errors_json) - if not xrval: - return __prepare_json(EXIT_OOPS, errors=errors_json) + path = __api_plan_file(api_inst) + try: + os.unlink(path) + except OSError as e: + raise api_errors._convert_error(e) - if res: - # If there are specific installed packages to update, - # then take only those packages to the latest version - # allowed by the patterns specified. (The versions - # specified can be older than what is installed.) - pkgs_update = pargs - review_release_notes = False - else: - # If no packages were specified, attempt to update all - # installed packages. - pkgs_update = None - review_release_notes = True - - return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore, - _noexecute=noexecute, _origins=origins, - _parsable_version=parsable_version, _quiet=quiet, - _review_release_notes=review_release_notes, - _show_licenses=show_licenses, _stage=stage, _verbose=verbose, - act_timeout=act_timeout, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, force=force, ignore_missing=ignore_missing, - li_erecurse=li_erecurse, li_parent_sync=li_parent_sync, - new_be=new_be, pkgs_update=pkgs_update, - refresh_catalogs=refresh_catalogs, reject_list=reject_pats, - update_index=update_index, display_plan_cb=display_plan_cb, - logger=logger) - -def _uninstall(op, api_inst, pargs, - act_timeout, backup_be, backup_be_name, be_activate, be_name, - ignore_missing, li_ignore, li_erecurse, li_parent_sync, new_be, noexecute, - parsable_version, quiet, stage, update_index, verbose, - display_plan_cb=None, logger=None): - """Attempt to take package specified to DELETED state.""" - - errors_json = [] - if not pargs: - error = {"reason": _("at least one package name required")} - errors_json.append(error) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - - rval, res = _get_fmri_args(api_inst, pargs, cmd=op, - errors_json=errors_json) - if not rval: - return __prepare_json(EXIT_OOPS, errors=errors_json) - return __api_op(op, api_inst, _li_ignore=li_ignore, - _noexecute=noexecute, _parsable_version=parsable_version, - _quiet=quiet, _stage=stage, _verbose=verbose, - act_timeout=act_timeout, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, ignore_missing=ignore_missing, - li_erecurse=li_erecurse, li_parent_sync=li_parent_sync, - new_be=new_be, pkgs_to_uninstall=pargs, update_index=update_index, - display_plan_cb=display_plan_cb, logger=logger) - -def _publisher_set(op, api_inst, pargs, ssl_key, ssl_cert, origin_uri, - reset_uuid, add_mirrors, remove_mirrors, add_origins, remove_origins, - enable_origins, disable_origins, refresh_allowed, disable, sticky, - search_before, search_after, search_first, approved_ca_certs, - revoked_ca_certs, unset_ca_certs, set_props, add_prop_values, - remove_prop_values, unset_props, repo_uri, proxy_uri, - verbose=None, li_erecurse=None): - """Function to set publisher.""" - - name = None - errors_json = [] - if len(pargs) == 0 and not repo_uri: - errors_json.append({"reason": _("requires a publisher name")}) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - elif len(pargs) > 1: - errors_json.append({"reason": _("only one publisher name may " - "be specified")}) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - elif pargs: - name = pargs[0] - - # Get sanitized SSL Cert/Key input values. - ssl_cert, ssl_key = _get_ssl_cert_key(api_inst.root, api_inst.is_zone, - ssl_cert, ssl_key) - - if not repo_uri: - # Normal case. - ret_json = _set_pub_error_wrap(_add_update_pub, name, [], - api_inst, name, disable=disable, sticky=sticky, - origin_uri=origin_uri, add_mirrors=add_mirrors, - remove_mirrors=remove_mirrors, add_origins=add_origins, - remove_origins=remove_origins, - enable_origins=enable_origins, - disable_origins=disable_origins, ssl_cert=ssl_cert, - ssl_key=ssl_key, search_before=search_before, - search_after=search_after, search_first=search_first, - reset_uuid=reset_uuid, refresh_allowed=refresh_allowed, - set_props=set_props, add_prop_values=add_prop_values, - remove_prop_values=remove_prop_values, - unset_props=unset_props, approved_cas=approved_ca_certs, - revoked_cas=revoked_ca_certs, unset_cas=unset_ca_certs, - proxy_uri=proxy_uri) - - if "errors" in ret_json: - for err in ret_json["errors"]: - errors_json.append(err) - return __prepare_json(ret_json["status"], errors=errors_json) - - # Automatic configuration via -p case. - def get_pubs(): - if proxy_uri: - proxies = [publisher.ProxyURI(proxy_uri)] - else: - proxies = [] - repo = publisher.RepositoryURI(repo_uri, - ssl_cert=ssl_cert, ssl_key=ssl_key, proxies=proxies) - return __prepare_json(EXIT_OK, data=api_inst.get_publisherdata( - repo=repo)) +def __verify_exit_status(api_inst): + """Determine verify exit status.""" + + plan = api_inst.describe() + for ( + item_id, + parent_id, + msg_time, + msg_level, + msg_type, + msg_text, + ) in plan.gen_item_messages(): + if msg_level == MSG_ERROR: + return EXIT_OOPS + return EXIT_OK + + +def __api_op( + _op, + _api_inst, + _accept=False, + _li_ignore=None, + _noexecute=False, + _origins=None, + _parsable_version=None, + _quiet=False, + _quiet_plan=False, + _review_release_notes=False, + _show_licenses=False, + _stage=API_STAGE_DEFAULT, + _verbose=0, + _unpackaged=False, + _unpackaged_only=False, + _verify_paths=EmptyI, + display_plan_cb=None, + logger=None, + **kwargs, +): + """Do something that involves the api. + + Arguments prefixed with '_' are primarily used within this + function. All other arguments must be specified via keyword + assignment and will be passed directly on to the api + interfaces being invoked.""" + + data = {} + if _stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]: + # create a new plan + ret = __api_plan( + _op=_op, + _api_inst=_api_inst, + _accept=_accept, + _li_ignore=_li_ignore, + _noexecute=_noexecute, + _origins=_origins, + _parsable_version=_parsable_version, + _quiet=_quiet, + _review_release_notes=_review_release_notes, + _show_licenses=_show_licenses, + _stage=_stage, + _verbose=_verbose, + _quiet_plan=_quiet_plan, + _unpackaged=_unpackaged, + _unpackaged_only=_unpackaged_only, + _verify_paths=_verify_paths, + display_plan_cb=display_plan_cb, + logger=logger, + **kwargs, + ) + + if "_failures" in _api_inst._img.transport.repo_status: + ret.setdefault("data", {}).update( + {"repo_status": _api_inst._img.transport.repo_status} + ) + + if ret["status"] != EXIT_OK: + return ret + if "data" in ret: + data.update(ret["data"]) + + if not _noexecute and _stage == API_STAGE_PLAN: + # We always save the plan, even if it is a noop. We + # do this because we want to be able to verify that we + # can load and execute a noop plan. (This mimics + # normal api behavior which doesn't prevent an api + # consumer from creating a noop plan and then + # preparing and executing it.) + __api_plan_save(_api_inst, logger=logger) + # for pkg verify or fix. + if _op in [PKG_OP_FIX, PKG_OP_VERIFY] and _noexecute and _quiet_plan: + exit_code = __verify_exit_status(_api_inst) + return __prepare_json(exit_code, data=data) + if _api_inst.planned_nothingtodo(): + return __prepare_json(EXIT_NOP, data=data) + if _noexecute or _stage == API_STAGE_PLAN: + return __prepare_json(EXIT_OK, data=data) + else: + assert _stage in [API_STAGE_PREPARE, API_STAGE_EXECUTE] + __api_plan_load(_api_inst, _stage, _origins, logger=logger) + + # Exceptions which happen here are printed in the above level, + # with or without some extra decoration done here. + if _stage in [API_STAGE_DEFAULT, API_STAGE_PREPARE]: + ret = __api_prepare_plan(_op, _api_inst) + pkg_timer.record("preparing", logger=logger) + + if ret["status"] != EXIT_OK: + return ret + if _stage == API_STAGE_PREPARE: + return __prepare_json(EXIT_OK, data=data) + + ret = __api_execute_plan(_op, _api_inst) + pkg_timer.record("executing", logger=logger) + + if ( + _review_release_notes + and ret["status"] == EXIT_OK + and _stage == API_STAGE_DEFAULT + and _api_inst.solaris_image() + ): + data["release_notes_url"] = misc.get_release_notes_url() + ret = __prepare_json(EXIT_OK, data=data) + elif ret["status"] == EXIT_OK and data: + ret = __prepare_json(EXIT_OK, data=data) + + return ret + + +def _exact_install( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + update_index, + verbose, + display_plan_cb=None, + logger=None, +): + errors_json = [] + if not pargs: + error = {"reason": _("at least one package name required")} + errors_json.append(error) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + + rval, res = _get_fmri_args(api_inst, pargs, cmd=op, errors_json=errors_json) + if not rval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + xrval, xres = _get_fmri_args( + api_inst, reject_pats, cmd=op, errors_json=errors_json + ) + if not xrval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _quiet=quiet, + _show_licenses=show_licenses, + _verbose=verbose, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + li_parent_sync=li_parent_sync, + new_be=new_be, + _parsable_version=parsable_version, + pkgs_inst=pargs, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + +def _install( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, + display_plan_cb=None, + logger=None, +): + """Attempt to take package specified to INSTALLED state. The operands + are interpreted as glob patterns.""" + + errors_json = [] + if not pargs: + error = {"reason": _("at least one package name required")} + errors_json.append(error) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + + rval, res = _get_fmri_args(api_inst, pargs, cmd=op, errors_json=errors_json) + if not rval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + xrval, xres = _get_fmri_args( + api_inst, reject_pats, cmd=op, errors_json=errors_json + ) + if not xrval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _parsable_version=parsable_version, + _quiet=quiet, + _show_licenses=show_licenses, + _stage=stage, + _verbose=verbose, + act_timeout=act_timeout, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + li_erecurse=li_erecurse, + li_parent_sync=li_parent_sync, + new_be=new_be, + pkgs_inst=pargs, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + +def _update( + op, + api_inst, + pargs, + accept, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + force, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + origins, + parsable_version, + quiet, + refresh_catalogs, + reject_pats, + show_licenses, + stage, + update_index, + verbose, + display_plan_cb=None, + logger=None, +): + """Attempt to take all installed packages specified to latest + version.""" + + errors_json = [] + rval, res = _get_fmri_args(api_inst, pargs, cmd=op, errors_json=errors_json) + if not rval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + xrval, xres = _get_fmri_args( + api_inst, reject_pats, cmd=op, errors_json=errors_json + ) + if not xrval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + if res: + # If there are specific installed packages to update, + # then take only those packages to the latest version + # allowed by the patterns specified. (The versions + # specified can be older than what is installed.) + pkgs_update = pargs + review_release_notes = False + else: + # If no packages were specified, attempt to update all + # installed packages. + pkgs_update = None + review_release_notes = True + + return __api_op( + op, + api_inst, + _accept=accept, + _li_ignore=li_ignore, + _noexecute=noexecute, + _origins=origins, + _parsable_version=parsable_version, + _quiet=quiet, + _review_release_notes=review_release_notes, + _show_licenses=show_licenses, + _stage=stage, + _verbose=verbose, + act_timeout=act_timeout, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + force=force, + ignore_missing=ignore_missing, + li_erecurse=li_erecurse, + li_parent_sync=li_parent_sync, + new_be=new_be, + pkgs_update=pkgs_update, + refresh_catalogs=refresh_catalogs, + reject_list=reject_pats, + update_index=update_index, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + +def _uninstall( + op, + api_inst, + pargs, + act_timeout, + backup_be, + backup_be_name, + be_activate, + be_name, + ignore_missing, + li_ignore, + li_erecurse, + li_parent_sync, + new_be, + noexecute, + parsable_version, + quiet, + stage, + update_index, + verbose, + display_plan_cb=None, + logger=None, +): + """Attempt to take package specified to DELETED state.""" + + errors_json = [] + if not pargs: + error = {"reason": _("at least one package name required")} + errors_json.append(error) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + + rval, res = _get_fmri_args(api_inst, pargs, cmd=op, errors_json=errors_json) + if not rval: + return __prepare_json(EXIT_OOPS, errors=errors_json) + + return __api_op( + op, + api_inst, + _li_ignore=li_ignore, + _noexecute=noexecute, + _parsable_version=parsable_version, + _quiet=quiet, + _stage=stage, + _verbose=verbose, + act_timeout=act_timeout, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + ignore_missing=ignore_missing, + li_erecurse=li_erecurse, + li_parent_sync=li_parent_sync, + new_be=new_be, + pkgs_to_uninstall=pargs, + update_index=update_index, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + +def _publisher_set( + op, + api_inst, + pargs, + ssl_key, + ssl_cert, + origin_uri, + reset_uuid, + add_mirrors, + remove_mirrors, + add_origins, + remove_origins, + enable_origins, + disable_origins, + refresh_allowed, + disable, + sticky, + search_before, + search_after, + search_first, + approved_ca_certs, + revoked_ca_certs, + unset_ca_certs, + set_props, + add_prop_values, + remove_prop_values, + unset_props, + repo_uri, + proxy_uri, + verbose=None, + li_erecurse=None, +): + """Function to set publisher.""" + + name = None + errors_json = [] + if len(pargs) == 0 and not repo_uri: + errors_json.append({"reason": _("requires a publisher name")}) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + elif len(pargs) > 1: + errors_json.append( + {"reason": _("only one publisher name may " "be specified")} + ) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + elif pargs: + name = pargs[0] + + # Get sanitized SSL Cert/Key input values. + ssl_cert, ssl_key = _get_ssl_cert_key( + api_inst.root, api_inst.is_zone, ssl_cert, ssl_key + ) + + if not repo_uri: + # Normal case. + ret_json = _set_pub_error_wrap( + _add_update_pub, + name, + [], + api_inst, + name, + disable=disable, + sticky=sticky, + origin_uri=origin_uri, + add_mirrors=add_mirrors, + remove_mirrors=remove_mirrors, + add_origins=add_origins, + remove_origins=remove_origins, + enable_origins=enable_origins, + disable_origins=disable_origins, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + search_before=search_before, + search_after=search_after, + search_first=search_first, + reset_uuid=reset_uuid, + refresh_allowed=refresh_allowed, + set_props=set_props, + add_prop_values=add_prop_values, + remove_prop_values=remove_prop_values, + unset_props=unset_props, + approved_cas=approved_ca_certs, + revoked_cas=revoked_ca_certs, + unset_cas=unset_ca_certs, + proxy_uri=proxy_uri, + ) + + if "errors" in ret_json: + for err in ret_json["errors"]: + errors_json.append(err) + return __prepare_json(ret_json["status"], errors=errors_json) - ret_json = None - try: - ret_json = _set_pub_error_wrap(get_pubs, name, - [api_errors.UnsupportedRepositoryOperation]) - except api_errors.UnsupportedRepositoryOperation as e: - # Fail if the operation can't be done automatically. - _error_json(str(e), cmd=op, errors_json=errors_json, - errorType="unsupported_repo_op") - return __prepare_json(EXIT_OOPS, errors=errors_json) + # Automatic configuration via -p case. + def get_pubs(): + if proxy_uri: + proxies = [publisher.ProxyURI(proxy_uri)] else: - if ret_json["status"] != EXIT_OK and "errors" in ret_json: - for err in ret_json["errors"]: - _error_json(err["reason"], cmd=op, - errors_json=errors_json) - return __prepare_json(ret_json["status"], - errors=errors_json) - # For the automatic publisher configuration case, update or add - # publishers based on whether they exist and if they match any - # specified publisher prefix. - if "data" not in ret_json: - _error_json(_(""" + proxies = [] + repo = publisher.RepositoryURI( + repo_uri, ssl_cert=ssl_cert, ssl_key=ssl_key, proxies=proxies + ) + return __prepare_json( + EXIT_OK, data=api_inst.get_publisherdata(repo=repo) + ) + + ret_json = None + try: + ret_json = _set_pub_error_wrap( + get_pubs, name, [api_errors.UnsupportedRepositoryOperation] + ) + except api_errors.UnsupportedRepositoryOperation as e: + # Fail if the operation can't be done automatically. + _error_json( + str(e), + cmd=op, + errors_json=errors_json, + errorType="unsupported_repo_op", + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + else: + if ret_json["status"] != EXIT_OK and "errors" in ret_json: + for err in ret_json["errors"]: + _error_json(err["reason"], cmd=op, errors_json=errors_json) + return __prepare_json(ret_json["status"], errors=errors_json) + # For the automatic publisher configuration case, update or add + # publishers based on whether they exist and if they match any + # specified publisher prefix. + if "data" not in ret_json: + _error_json( + _( + """ The specified repository did not contain any publisher configuration information. This is likely the result of a repository configuration error. Please contact the repository administrator for further -assistance."""), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - - pubs = ret_json["data"] - if name and name not in pubs: - known = [p.prefix for p in pubs] - unknown = [name] - e = api_errors.UnknownRepositoryPublishers(known=known, - unknown=unknown, location=repo_uri) - _error_json(str(e), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) +assistance.""" + ), + errors_json=errors_json, + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + pubs = ret_json["data"] + if name and name not in pubs: + known = [p.prefix for p in pubs] + unknown = [name] + e = api_errors.UnknownRepositoryPublishers( + known=known, unknown=unknown, location=repo_uri + ) + _error_json(str(e), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + added = [] + updated = [] + failed = [] + + for src_pub in sorted(pubs): + prefix = src_pub.prefix + if name and prefix != name: + # User didn't request this one. + continue + + src_repo = src_pub.repository + if not api_inst.has_publisher(prefix=prefix): + add_origins = [] + if not src_repo or not src_repo.origins: + # If the repository publisher configuration + # didn't include configuration information + # for the publisher's repositories, assume + # that the origin for the new publisher + # matches the URI provided. + add_origins.append(repo_uri) + + # Any -p origins/mirrors returned from get_pubs() should + # use the proxy we declared, if any. + if proxy_uri and src_repo: + proxies = [publisher.ProxyURI(proxy_uri)] + for repo_uri in src_repo.origins: + repo_uri.proxies = proxies + for repo_uri in src_repo.mirrors: + repo_uri.proxies = proxies + + ret_json = _set_pub_error_wrap( + _add_update_pub, + name, + [], + api_inst, + prefix, + pub=src_pub, + add_origins=add_origins, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + sticky=sticky, + search_after=search_after, + search_before=search_before, + search_first=search_first, + set_props=set_props, + add_prop_values=add_prop_values, + remove_prop_values=remove_prop_values, + unset_props=unset_props, + proxy_uri=proxy_uri, + ) + if ret_json["status"] == EXIT_OK: + added.append(prefix) + + # When multiple publishers result from a single -p + # operation, this ensures that the new publishers are + # ordered correctly. + search_first = False + search_after = prefix + search_before = None + else: + add_origins = [] + add_mirrors = [] + dest_pub = api_inst.get_publisher(prefix=prefix, duplicate=True) + dest_repo = dest_pub.repository + if dest_repo.origins and not dest_repo.has_origin(repo_uri): + add_origins = [repo_uri] + + if not src_repo and not add_origins: + # The repository doesn't have to provide origin + # information for publishers. If it doesn't, + # the origin of every publisher returned is + # assumed to match the URI that the user + # provided. Since this is an update case, + # nothing special needs to be done. + if not dest_repo.origins: + add_origins = [repo_uri] + elif src_repo: + # Avoid duplicates by adding only those mirrors + # or origins not already known. + add_mirrors = [ + u.uri + for u in src_repo.mirrors + if u.uri not in dest_repo.mirrors + ] + add_origins = [ + u.uri + for u in src_repo.origins + if u.uri not in dest_repo.origins + ] + + # Special bits to update; for these, take the + # new value as-is (don't attempt to merge). + for prop in ( + "collection_type", + "description", + "legal_uris", + "name", + "refresh_seconds", + "registration_uri", + "related_uris", + ): + src_val = getattr(src_repo, prop) + if src_val is not None: + setattr(dest_repo, prop, src_val) + + # If an alias doesn't already exist, update it too. + if src_pub.alias and not dest_pub.alias: + dest_pub.alias = src_pub.alias + + ret_json = _set_pub_error_wrap( + _add_update_pub, + name, + [], + api_inst, + prefix, + pub=dest_pub, + add_mirrors=add_mirrors, + add_origins=add_origins, + set_props=set_props, + add_prop_values=add_prop_values, + remove_prop_values=remove_prop_values, + unset_props=unset_props, + proxy_uri=proxy_uri, + ) + + if ret_json["status"] == EXIT_OK: + updated.append(prefix) + + if ret_json["status"] != EXIT_OK: + for err in ret_json["errors"]: + failed.append((prefix, err["reason"])) + continue + + first = True + for pub, rmsg in failed: + if first: + first = False + _error_json( + "failed to add or update one or more " "publishers", + cmd=op, + errors_json=errors_json, + errorType="publisher_set", + ) + errors_json.append( + { + "reason": " {0}:\n{1}".format(pub, rmsg), + "errtype": "publisher_set", + } + ) + + data = {} + if added or updated: + if first: + data["header"] = "pkg set-publisher:" + if added: + data["added"] = added + if updated: + data["updated"] = updated + + if failed: + if len(failed) != len(pubs): + # Not all publishers retrieved could be added or + # updated. + return __prepare_json(EXIT_PARTIAL, data=data, errors=errors_json) + return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) + + # Now that the configuration was successful, attempt to refresh the + # catalog data for all of the configured publishers. If the refresh + # had been allowed earlier while configuring each publisher, then this + # wouldn't be necessary and some possibly invalid configuration could + # have been eliminated sooner. However, that would be much slower as + # each refresh requires a client image state rebuild. + ret_json = __refresh(api_inst, added + updated) + ret_json["data"] = data + return ret_json - added = [] - updated = [] - failed = [] - - for src_pub in sorted(pubs): - prefix = src_pub.prefix - if name and prefix != name: - # User didn't request this one. - continue - - src_repo = src_pub.repository - if not api_inst.has_publisher(prefix=prefix): - add_origins = [] - if not src_repo or not src_repo.origins: - # If the repository publisher configuration - # didn't include configuration information - # for the publisher's repositories, assume - # that the origin for the new publisher - # matches the URI provided. - add_origins.append(repo_uri) - - # Any -p origins/mirrors returned from get_pubs() should - # use the proxy we declared, if any. - if proxy_uri and src_repo: - proxies = [publisher.ProxyURI(proxy_uri)] - for repo_uri in src_repo.origins: - repo_uri.proxies = proxies - for repo_uri in src_repo.mirrors: - repo_uri.proxies = proxies - - ret_json = _set_pub_error_wrap(_add_update_pub, name, - [], api_inst, prefix, pub=src_pub, - add_origins=add_origins, ssl_cert=ssl_cert, - ssl_key=ssl_key, sticky=sticky, - search_after=search_after, - search_before=search_before, - search_first=search_first, - set_props=set_props, - add_prop_values=add_prop_values, - remove_prop_values=remove_prop_values, - unset_props=unset_props, proxy_uri=proxy_uri) - if ret_json["status"] == EXIT_OK: - added.append(prefix) - - # When multiple publishers result from a single -p - # operation, this ensures that the new publishers are - # ordered correctly. - search_first = False - search_after = prefix - search_before = None - else: - add_origins = [] - add_mirrors = [] - dest_pub = api_inst.get_publisher(prefix=prefix, - duplicate=True) - dest_repo = dest_pub.repository - if dest_repo.origins and \ - not dest_repo.has_origin(repo_uri): - add_origins = [repo_uri] - - if not src_repo and not add_origins: - # The repository doesn't have to provide origin - # information for publishers. If it doesn't, - # the origin of every publisher returned is - # assumed to match the URI that the user - # provided. Since this is an update case, - # nothing special needs to be done. - if not dest_repo.origins: - add_origins = [repo_uri] - elif src_repo: - # Avoid duplicates by adding only those mirrors - # or origins not already known. - add_mirrors = [ - u.uri - for u in src_repo.mirrors - if u.uri not in dest_repo.mirrors - ] - add_origins = [ - u.uri - for u in src_repo.origins - if u.uri not in dest_repo.origins - ] - - # Special bits to update; for these, take the - # new value as-is (don't attempt to merge). - for prop in ("collection_type", "description", - "legal_uris", "name", "refresh_seconds", - "registration_uri", "related_uris"): - src_val = getattr(src_repo, prop) - if src_val is not None: - setattr(dest_repo, prop, - src_val) - - # If an alias doesn't already exist, update it too. - if src_pub.alias and not dest_pub.alias: - dest_pub.alias = src_pub.alias - - ret_json = _set_pub_error_wrap(_add_update_pub, name, - [], api_inst, prefix, pub=dest_pub, - add_mirrors=add_mirrors, add_origins=add_origins, - set_props=set_props, - add_prop_values=add_prop_values, - remove_prop_values=remove_prop_values, - unset_props=unset_props, proxy_uri=proxy_uri) - - if ret_json["status"] == EXIT_OK: - updated.append(prefix) - - if ret_json["status"] != EXIT_OK: - for err in ret_json["errors"]: - failed.append((prefix, err["reason"])) - continue - - first = True - for pub, rmsg in failed: - if first: - first = False - _error_json("failed to add or update one or more " - "publishers", cmd=op, errors_json=errors_json, - errorType="publisher_set") - errors_json.append({"reason": " {0}:\n{1}".format(pub, rmsg), - "errtype": "publisher_set"}) - - data = {} - if added or updated: - if first: - data["header"] = "pkg set-publisher:" - if added: - data["added"] = added - if updated: - data["updated"] = updated - - if failed: - if len(failed) != len(pubs): - # Not all publishers retrieved could be added or - # updated. - return __prepare_json(EXIT_PARTIAL, data=data, - errors=errors_json) - return __prepare_json(EXIT_OOPS, data=data, errors=errors_json) - - # Now that the configuration was successful, attempt to refresh the - # catalog data for all of the configured publishers. If the refresh - # had been allowed earlier while configuring each publisher, then this - # wouldn't be necessary and some possibly invalid configuration could - # have been eliminated sooner. However, that would be much slower as - # each refresh requires a client image state rebuild. - ret_json = __refresh(api_inst, added + updated) - ret_json["data"] = data - return ret_json def _publisher_unset(op, api_inst, pargs): - """Function to unset publishers.""" - - errors_json = [] - if not pargs: - errors_json.append({"reason": _("at least one publisher must " - "be specified")}) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - - errors = [] - goal = len(pargs) - progtrack = api_inst.progresstracker - progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=goal) - for name in pargs: - try: - api_inst.remove_publisher(prefix=name, alias=name) - except api_errors.ImageFormatUpdateNeeded as e: - _format_update_error(e, errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except (api_errors.PermissionsException, - api_errors.PublisherError, - api_errors.ModifyingSyspubException) as e: - errors.append((name, e)) - finally: - progtrack.job_add_progress(progtrack.JOB_PKG_CACHE) - - progtrack.job_done(progtrack.JOB_PKG_CACHE) - retcode = EXIT_OK - errors_json = [] - if errors: - if len(errors) == len(pargs): - # If the operation failed for every provided publisher - # prefix or alias, complete failure occurred. - retcode = EXIT_OOPS - else: - # If the operation failed for only some of the provided - # publisher prefixes or aliases, then partial failure - # occurred. - retcode = EXIT_PARTIAL - - txt = "" - for name, err in errors: - txt += "\n" - txt += _("Removal failed for '{pub}': {msg}").format( - pub=name, msg=err) - txt += "\n" - _error_json(txt, cmd=op, errors_json=errors_json) - - return __prepare_json(retcode, errors=errors_json) - -def _publisher_list(op, api_inst, pargs, omit_headers, preferred_only, - inc_disabled, output_format): - """pkg publishers. Note: publisher_a is a left-over parameter.""" - - errors_json = [] - field_data = { - "publisher" : [("default", "tsv"), _("PUBLISHER"), ""], - "attrs" : [("default"), "", ""], - "type" : [("default", "tsv"), _("TYPE"), ""], - "status" : [("default", "tsv"), _("STATUS"), ""], - "repo_loc" : [("default"), _("LOCATION"), ""], - "uri": [("tsv"), _("URI"), ""], - "sticky" : [("tsv"), _("STICKY"), ""], - "enabled" : [("tsv"), _("ENABLED"), ""], - "syspub" : [("tsv"), _("SYSPUB"), ""], - "proxy" : [("tsv"), _("PROXY"), ""], - "proxied" : [("default"), _("P"), ""] - } - - desired_field_order = (_("PUBLISHER"), "", _("STICKY"), - _("SYSPUB"), _("ENABLED"), _("TYPE"), - _("STATUS"), _("P"), _("LOCATION")) - - # Custom key function for preserving field ordering - def key_fields(item): - return desired_field_order.index(get_header(item)) - - # Functions for manipulating field_data records - def filter_default(record): - return "default" in record[0] - - def filter_tsv(record): - return "tsv" in record[0] - - def get_header(record): - return record[1] - - def get_value(record): - return record[2] - - def set_value(record, value): - record[2] = value - - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) - - cert_cache = {} - def get_cert_info(ssl_cert): - if not ssl_cert: - return None - if ssl_cert not in cert_cache: - c = cert_cache[ssl_cert] = {} - errors = c["errors"] = [] - times = c["info"] = { - "effective": "", - "expiration": "", - } - - try: - cert = misc.validate_ssl_cert(ssl_cert) - except (EnvironmentError, - api_errors.CertificateError, - api_errors.PermissionsException) as e: - # If the cert information can't be retrieved, - # add the errors to a list and continue on. - errors.append(e) - c["valid"] = False - else: - nb = cert.get_notBefore() - # strptime's first argument must be str - t = time.strptime(misc.force_str(nb), - "%Y%m%d%H%M%SZ") - nb = datetime.datetime.utcfromtimestamp( - calendar.timegm(t)) - times["effective"] = nb.strftime("%c") - - na = cert.get_notAfter() - t = time.strptime(misc.force_str(na), - "%Y%m%d%H%M%SZ") - na = datetime.datetime.utcfromtimestamp( - calendar.timegm(t)) - times["expiration"] = na.strftime("%c") - c["valid"] = True - - return cert_cache[ssl_cert] - - retcode = EXIT_OK - data = {} - if len(pargs) == 0: - if preferred_only: - pref_pub = api_inst.get_highest_ranked_publisher() - if api_inst.has_publisher(pref_pub): - pubs = [pref_pub] - else: - # Only publisher known is from an installed - # package and is not configured in the image. - pubs = [] - else: - pubs = [ - p for p in api_inst.get_publishers() - if inc_disabled or not p.disabled - ] - # Create a formatting string for the default output - # format - if output_format == "default": - filter_func = filter_default - - # Create a formatting string for the tsv output - # format - if output_format == "tsv": - filter_func = filter_tsv - desired_field_order = (_("PUBLISHER"), "", _("STICKY"), - _("SYSPUB"), _("ENABLED"), _("TYPE"), - _("STATUS"), _("URI"), _("PROXY")) - - # Extract our list of headers from the field_data - # dictionary Make sure they are extracted in the - # desired order by using our custom key function. - hdrs = list(map(get_header, sorted(filter(filter_func, - list(field_data.values())), key=key_fields))) - - if not omit_headers: - data["headers"] = hdrs - data["publishers"] = [] - for p in pubs: - # Store all our publisher related data in - # field_data ready for output - - set_value(field_data["publisher"], p.prefix) - # Setup the synthetic attrs field if the - # format is default. - if output_format == "default": - pstatus = "" - - if not p.sticky: - pstatus_list = [_("non-sticky")] - else: - pstatus_list = [] - - if p.disabled: - pstatus_list.append(_("disabled")) - if p.sys_pub: - pstatus_list.append(_("syspub")) - if pstatus_list: - pstatus = "({0})".format( - ", ".join(pstatus_list)) - set_value(field_data["attrs"], pstatus) - - if p.sticky: - set_value(field_data["sticky"], _("true")) - else: - set_value(field_data["sticky"], _("false")) - if not p.disabled: - set_value(field_data["enabled"], _("true")) - else: - set_value(field_data["enabled"], _("false")) - if p.sys_pub: - set_value(field_data["syspub"], _("true")) - else: - set_value(field_data["syspub"], _("false")) - - # Only show the selected repository's information in - # summary view. - if p.repository: - origins = p.repository.origins - mirrors = p.repository.mirrors - else: - origins = mirrors = [] - - set_value(field_data["repo_loc"], "") - set_value(field_data["proxied"], "") - # Update field_data for each origin and output - # a publisher record in our desired format. - for uri in sorted(origins): - # XXX get the real origin status - set_value(field_data["type"], _("origin")) - set_value(field_data["proxy"], "-") - set_value(field_data["proxied"], "F") - - set_value(field_data["uri"], uri) - if uri.disabled: - set_value(field_data["enabled"], - _("false")) - set_value(field_data["status"], - _("disabled")) - else: - set_value(field_data["enabled"], - _("true")) - set_value(field_data["status"], - _("online")) - - if uri.proxies: - set_value(field_data["proxied"], _("T")) - set_value(field_data["proxy"], - ", ".join( - [proxy.uri - for proxy in uri.proxies])) - if uri.system: - set_value(field_data["repo_loc"], - SYSREPO_HIDDEN_URI) - else: - set_value(field_data["repo_loc"], uri) - - values = map(get_value, - sorted(filter(filter_func, - field_data.values()), key=key_fields) - ) - entry = [] - for e in values: - if isinstance(e, six.string_types): - entry.append(e) - else: - entry.append(str(e)) - data["publishers"].append(entry) - # Update field_data for each mirror and output - # a publisher record in our desired format. - for uri in mirrors: - # XXX get the real mirror status - set_value(field_data["type"], _("mirror")) - # We do not currently deal with mirrors. So - # they are always online. - set_value(field_data["status"], _("online")) - set_value(field_data["proxy"], "-") - set_value(field_data["proxied"], _("F")) - - set_value(field_data["uri"], uri) - - if uri.proxies: - set_value(field_data["proxied"], - _("T")) - set_value(field_data["proxy"], - ", ".join( - [p.uri for p in uri.proxies])) - if uri.system: - set_value(field_data["repo_loc"], - SYSREPO_HIDDEN_URI) - else: - set_value(field_data["repo_loc"], uri) - - values = map(get_value, - sorted(filter(filter_func, - field_data.values()), key=key_fields) - ) - entry = [] - for e in values: - if isinstance(e, six.string_types): - entry.append(e) - else: - entry.append(str(e)) - data["publishers"].append(entry) - - if not origins and not mirrors: - set_value(field_data["type"], "") - set_value(field_data["status"], "") - set_value(field_data["uri"], "") - set_value(field_data["proxy"], "") - values = map(get_value, - sorted(filter(filter_func, - field_data.values()), key=key_fields) - ) - entry = [] - for e in values: - if isinstance(e, six.string_types): - entry.append(e) - else: - entry.append(str(e)) - data["publishers"].append(entry) - else: - def collect_ssl_info(uri, uri_data): - retcode = EXIT_OK - c = get_cert_info(uri.ssl_cert) - uri_data["SSL Key"] = str(uri.ssl_key) - uri_data["SSL Cert"] = str(uri.ssl_cert) - - if not c: - return retcode - - if c["errors"]: - retcode = EXIT_OOPS - - for e in c["errors"]: - errors_json.append({"reason": - "\n" + str(e) + "\n", "errtype": "cert_info"}) - - if c["valid"]: - uri_data["Cert. Effective Date"] = \ - str(c["info"]["effective"]) - uri_data["Cert. Expiration Date"] = \ - str(c["info"]["expiration"]) - return retcode - - def collect_repository(r, pub_data): - retcode = 0 - origins_data = [] - for uri in r.origins: - origin_data = {"Origin URI": str(uri)} - if uri.disabled: - origin_data["Status"] = _("Disabled") - else: - origin_data["Status"] = _("Online") - if uri.proxies: - origin_data["Proxy"] = \ - [str(p.uri) for p in uri.proxies] - rval = collect_ssl_info(uri, origin_data) - if rval == 1: - retcode = EXIT_PARTIAL - origins_data.append(origin_data) - - mirrors_data = [] - for uri in r.mirrors: - mirror_data = {"Mirror URI": str(uri)} - mirror_data["Status"] = _("Online") - if uri.proxies: - mirror_data["Proxy"] = \ - [str(p.uri) for p in uri.proxies] - rval = collect_ssl_info(uri, mirror_data) - if rval == 1: - retcode = EXIT_PARTIAL - mirrors_data.append(mirror_data) - if origins_data: - pub_data["origins"] = origins_data - if mirrors_data: - pub_data["mirrors"] = mirrors_data - return retcode - - def collect_signing_certs(p, pub_data): - if p.approved_ca_certs: - pub_data["Approved CAs"] = [str(cert) for - cert in p.approved_ca_certs] - if p.revoked_ca_certs: - pub_data["Revoked CAs"] = [str(cert) for - cert in p.revoked_ca_certs] - - for name in pargs: - # detailed print - pub = api_inst.get_publisher(prefix=name, alias=name) - dt = api_inst.get_publisher_last_update_time(pub.prefix) - if dt: - dt = dt.strftime("%c") - - pub_data = {} - pub_data["Publisher"] = pub.prefix - pub_data["Alias"] = pub.alias - - rval = collect_repository(pub.repository, pub_data) - if rval != 0: - # There was an error in displaying some - # of the information about a repository. - # However, continue on. - retcode = rval - - pub_data["Client UUID"] = pub.client_uuid - pub_data["Catalog Updated"] = dt - collect_signing_certs(pub, pub_data) - if pub.disabled: - pub_data["enabled"] = "No" - else: - pub_data["enabled"] = "Yes" - if pub.sticky: - pub_data["sticky"] = "Yes" - else: - pub_data["sticky"] = "No" - if pub.sys_pub: - pub_data["sys_pub"] = "Yes" - else: - pub_data["sys_pub"] = "No" - if pub.properties: - pub_data["Properties"] = {} - for k, v in six.iteritems(pub.properties): - pub_data["Properties"][k] = v - data.setdefault("publisher_details", []).append( - pub_data) - return __prepare_json(retcode, data=data, errors=errors_json, op=op) - -def _info(op, api_inst, pargs, display_license, info_local, info_remote, - origins, quiet): - """Display information about a package or packages. - """ - - errors_json = [] - data = {} - if info_remote and not pargs: - error = {"reason": _("must request remote info for specific " - "packages")} - errors_json.append(error) - return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) - - err = EXIT_OK - # Reset the progress tracker here, because we may have to switch to a - # different tracker due to the options parse. - api_inst.progresstracker = _get_tracker() - - api_inst.progresstracker.set_purpose( - api_inst.progresstracker.PURPOSE_LISTING) - - info_needed = api.PackageInfo.ALL_OPTIONS - if not display_license: - info_needed = api.PackageInfo.ALL_OPTIONS - \ - frozenset([api.PackageInfo.LICENSES]) - info_needed -= api.PackageInfo.ACTION_OPTIONS - info_needed |= frozenset([api.PackageInfo.DEPENDENCIES]) - + """Function to unset publishers.""" + + errors_json = [] + if not pargs: + errors_json.append( + {"reason": _("at least one publisher must " "be specified")} + ) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + + errors = [] + goal = len(pargs) + progtrack = api_inst.progresstracker + progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=goal) + for name in pargs: try: - ret = api_inst.info(pargs, info_local, info_needed, - ranked=info_remote, repos=origins) + api_inst.remove_publisher(prefix=name, alias=name) except api_errors.ImageFormatUpdateNeeded as e: - _format_update_error(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.NoPackagesInstalledException: - _error_json(_("no packages installed"), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ApiException as e: - _error_json(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - - pis = ret[api.ImageInterface.INFO_FOUND] - notfound = ret[api.ImageInterface.INFO_MISSING] - illegals = ret[api.ImageInterface.INFO_ILLEGALS] - - if illegals: - # No other results will be returned if illegal patterns were - # specified. - for i in illegals: - errors_json.append({"reason": str(i)}) - return __prepare_json(EXIT_OOPS, errors=errors_json) + _format_update_error(e, errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except ( + api_errors.PermissionsException, + api_errors.PublisherError, + api_errors.ModifyingSyspubException, + ) as e: + errors.append((name, e)) + finally: + progtrack.job_add_progress(progtrack.JOB_PKG_CACHE) + + progtrack.job_done(progtrack.JOB_PKG_CACHE) + retcode = EXIT_OK + errors_json = [] + if errors: + if len(errors) == len(pargs): + # If the operation failed for every provided publisher + # prefix or alias, complete failure occurred. + retcode = EXIT_OOPS + else: + # If the operation failed for only some of the provided + # publisher prefixes or aliases, then partial failure + # occurred. + retcode = EXIT_PARTIAL + + txt = "" + for name, err in errors: + txt += "\n" + txt += _("Removal failed for '{pub}': {msg}").format( + pub=name, msg=err + ) + txt += "\n" + _error_json(txt, cmd=op, errors_json=errors_json) + + return __prepare_json(retcode, errors=errors_json) + + +def _publisher_list( + op, + api_inst, + pargs, + omit_headers, + preferred_only, + inc_disabled, + output_format, +): + """pkg publishers. Note: publisher_a is a left-over parameter.""" + + errors_json = [] + field_data = { + "publisher": [("default", "tsv"), _("PUBLISHER"), ""], + "attrs": [("default"), "", ""], + "type": [("default", "tsv"), _("TYPE"), ""], + "status": [("default", "tsv"), _("STATUS"), ""], + "repo_loc": [("default"), _("LOCATION"), ""], + "uri": [("tsv"), _("URI"), ""], + "sticky": [("tsv"), _("STICKY"), ""], + "enabled": [("tsv"), _("ENABLED"), ""], + "syspub": [("tsv"), _("SYSPUB"), ""], + "proxy": [("tsv"), _("PROXY"), ""], + "proxied": [("default"), _("P"), ""], + } + + desired_field_order = ( + _("PUBLISHER"), + "", + _("STICKY"), + _("SYSPUB"), + _("ENABLED"), + _("TYPE"), + _("STATUS"), + _("P"), + _("LOCATION"), + ) + + # Custom key function for preserving field ordering + def key_fields(item): + return desired_field_order.index(get_header(item)) + + # Functions for manipulating field_data records + def filter_default(record): + return "default" in record[0] + + def filter_tsv(record): + return "tsv" in record[0] + + def get_header(record): + return record[1] + + def get_value(record): + return record[2] + + def set_value(record, value): + record[2] = value + + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) + + cert_cache = {} + + def get_cert_info(ssl_cert): + if not ssl_cert: + return None + if ssl_cert not in cert_cache: + c = cert_cache[ssl_cert] = {} + errors = c["errors"] = [] + times = c["info"] = { + "effective": "", + "expiration": "", + } - no_licenses = [] - for i, pi in enumerate(pis): - if display_license: - if not pi.licenses: - no_licenses.append(pi.fmri) - elif not quiet: - lics = [] - for lic in pi.licenses: - lics.append(str(lic)) - data.setdefault("licenses", []).append( - [pi.pkg_stem, lics]) - continue - - if quiet: - continue - - state = "" - if api.PackageInfo.INSTALLED in pi.states: - state = _("Installed") - elif api.PackageInfo.UNSUPPORTED in pi.states: - state = _("Unsupported") + try: + cert = misc.validate_ssl_cert(ssl_cert) + except ( + EnvironmentError, + api_errors.CertificateError, + api_errors.PermissionsException, + ) as e: + # If the cert information can't be retrieved, + # add the errors to a list and continue on. + errors.append(e) + c["valid"] = False + else: + nb = cert.get_notBefore() + # strptime's first argument must be str + t = time.strptime(misc.force_str(nb), "%Y%m%d%H%M%SZ") + nb = datetime.datetime.utcfromtimestamp(calendar.timegm(t)) + times["effective"] = nb.strftime("%c") + + na = cert.get_notAfter() + t = time.strptime(misc.force_str(na), "%Y%m%d%H%M%SZ") + na = datetime.datetime.utcfromtimestamp(calendar.timegm(t)) + times["expiration"] = na.strftime("%c") + c["valid"] = True + + return cert_cache[ssl_cert] + + retcode = EXIT_OK + data = {} + if len(pargs) == 0: + if preferred_only: + pref_pub = api_inst.get_highest_ranked_publisher() + if api_inst.has_publisher(pref_pub): + pubs = [pref_pub] + else: + # Only publisher known is from an installed + # package and is not configured in the image. + pubs = [] + else: + pubs = [ + p + for p in api_inst.get_publishers() + if inc_disabled or not p.disabled + ] + # Create a formatting string for the default output + # format + if output_format == "default": + filter_func = filter_default + + # Create a formatting string for the tsv output + # format + if output_format == "tsv": + filter_func = filter_tsv + desired_field_order = ( + _("PUBLISHER"), + "", + _("STICKY"), + _("SYSPUB"), + _("ENABLED"), + _("TYPE"), + _("STATUS"), + _("URI"), + _("PROXY"), + ) + + # Extract our list of headers from the field_data + # dictionary Make sure they are extracted in the + # desired order by using our custom key function. + hdrs = list( + map( + get_header, + sorted( + filter(filter_func, list(field_data.values())), + key=key_fields, + ), + ) + ) + + if not omit_headers: + data["headers"] = hdrs + data["publishers"] = [] + for p in pubs: + # Store all our publisher related data in + # field_data ready for output + + set_value(field_data["publisher"], p.prefix) + # Setup the synthetic attrs field if the + # format is default. + if output_format == "default": + pstatus = "" + + if not p.sticky: + pstatus_list = [_("non-sticky")] else: - state = _("Not installed") - - states = [] - lparen = False - if api.PackageInfo.OBSOLETE in pi.states: - states.append(_("Obsolete")) - elif api.PackageInfo.RENAMED in pi.states: - states.append(_("Renamed")) - elif api.PackageInfo.LEGACY in pi.states: - states.append(_("Legacy")) - if api.PackageInfo.FROZEN in pi.states: - states.append(_("Frozen")) - if api.PackageInfo.MANUAL in pi.states: - states.append(_("Manually installed")) - if len(states): - state += ' ({})'.format(', '.join(states)) - - attr_list = [] - seen = {} - - def __append_attr_lists(label, values): - """Given arguments label and values, either extend - the existing list value or add new one to - attr_list""" - - if not isinstance(values, list): - values = [values] - if label in seen: - seen[label].extend(values) - else: - attr_list.append([label, values]) - seen[label] = values - - __append_attr_lists(_("Name"), pi.pkg_stem) - __append_attr_lists(_("Summary"), pi.summary) - if pi.description: - __append_attr_lists(_("Description"), pi.description) - if pi.category_info_list: - category_info = [] - verbose = len(pi.category_info_list) > 1 - category_info.append \ - (pi.category_info_list[0].__str__(verbose)) - if len(pi.category_info_list) > 1: - for ci in pi.category_info_list[1:]: - category_info.append \ - (ci.__str__(verbose)) - __append_attr_lists(_("Category"), category_info) - - __append_attr_lists(_("State"), state) - - # Renamed packages have dependencies, but the dependencies - # may not apply to this image's variants so won't be - # returned. - if api.PackageInfo.RENAMED in pi.states: - __append_attr_lists(_("Renamed to"), pi.dependencies) - - # XXX even more info on the publisher would be nice? - __append_attr_lists(_("Publisher"), pi.publisher) - hum_ver = pi.get_attr_values("pkg.human-version") - if hum_ver and hum_ver[0] != str(pi.version): - __append_attr_lists(_("Version"), "{0} ({1})".format( - pi.version, hum_ver[0])) + pstatus_list = [] + + if p.disabled: + pstatus_list.append(_("disabled")) + if p.sys_pub: + pstatus_list.append(_("syspub")) + if pstatus_list: + pstatus = "({0})".format(", ".join(pstatus_list)) + set_value(field_data["attrs"], pstatus) + + if p.sticky: + set_value(field_data["sticky"], _("true")) + else: + set_value(field_data["sticky"], _("false")) + if not p.disabled: + set_value(field_data["enabled"], _("true")) + else: + set_value(field_data["enabled"], _("false")) + if p.sys_pub: + set_value(field_data["syspub"], _("true")) + else: + set_value(field_data["syspub"], _("false")) + + # Only show the selected repository's information in + # summary view. + if p.repository: + origins = p.repository.origins + mirrors = p.repository.mirrors + else: + origins = mirrors = [] + + set_value(field_data["repo_loc"], "") + set_value(field_data["proxied"], "") + # Update field_data for each origin and output + # a publisher record in our desired format. + for uri in sorted(origins): + # XXX get the real origin status + set_value(field_data["type"], _("origin")) + set_value(field_data["proxy"], "-") + set_value(field_data["proxied"], "F") + + set_value(field_data["uri"], uri) + if uri.disabled: + set_value(field_data["enabled"], _("false")) + set_value(field_data["status"], _("disabled")) else: - __append_attr_lists(_("Version"), str(pi.version)) - - __append_attr_lists(_("Branch"), str(pi.branch)) - __append_attr_lists(_("Packaging Date"), pi.packaging_date) - if pi.last_install: - __append_attr_lists(_("Last Install Time"), - pi.last_install) - if pi.last_update: - __append_attr_lists(_("Last Update Time"), - pi.last_update) - __append_attr_lists(_("Size"), misc.bytes_to_str(pi.size)) - __append_attr_lists(_("FMRI"), - pi.fmri.get_fmri(include_build=False)) - # XXX add license/copyright info here? - - addl_attr_list = { - "info.keyword": _("Additional Keywords"), - "info.upstream": _("Project Contact"), - "info.maintainer": _("Project Maintainer"), - "info.maintainer-url": _("Project Maintainer URL"), - "pkg.detailed-url": _("Project URL"), - "info.upstream-url": _("Project URL"), - "info.repository-changeset": _("Repository Changeset"), - "info.repository-url": _("Source URL"), - "info.source-url": _("Source URL") - } + set_value(field_data["enabled"], _("true")) + set_value(field_data["status"], _("online")) + + if uri.proxies: + set_value(field_data["proxied"], _("T")) + set_value( + field_data["proxy"], + ", ".join([proxy.uri for proxy in uri.proxies]), + ) + if uri.system: + set_value(field_data["repo_loc"], SYSREPO_HIDDEN_URI) + else: + set_value(field_data["repo_loc"], uri) + + values = map( + get_value, + sorted( + filter(filter_func, field_data.values()), key=key_fields + ), + ) + entry = [] + for e in values: + if isinstance(e, six.string_types): + entry.append(e) + else: + entry.append(str(e)) + data["publishers"].append(entry) + # Update field_data for each mirror and output + # a publisher record in our desired format. + for uri in mirrors: + # XXX get the real mirror status + set_value(field_data["type"], _("mirror")) + # We do not currently deal with mirrors. So + # they are always online. + set_value(field_data["status"], _("online")) + set_value(field_data["proxy"], "-") + set_value(field_data["proxied"], _("F")) + + set_value(field_data["uri"], uri) + + if uri.proxies: + set_value(field_data["proxied"], _("T")) + set_value( + field_data["proxy"], + ", ".join([p.uri for p in uri.proxies]), + ) + if uri.system: + set_value(field_data["repo_loc"], SYSREPO_HIDDEN_URI) + else: + set_value(field_data["repo_loc"], uri) + + values = map( + get_value, + sorted( + filter(filter_func, field_data.values()), key=key_fields + ), + ) + entry = [] + for e in values: + if isinstance(e, six.string_types): + entry.append(e) + else: + entry.append(str(e)) + data["publishers"].append(entry) + + if not origins and not mirrors: + set_value(field_data["type"], "") + set_value(field_data["status"], "") + set_value(field_data["uri"], "") + set_value(field_data["proxy"], "") + values = map( + get_value, + sorted( + filter(filter_func, field_data.values()), key=key_fields + ), + ) + entry = [] + for e in values: + if isinstance(e, six.string_types): + entry.append(e) + else: + entry.append(str(e)) + data["publishers"].append(entry) + else: + + def collect_ssl_info(uri, uri_data): + retcode = EXIT_OK + c = get_cert_info(uri.ssl_cert) + uri_data["SSL Key"] = str(uri.ssl_key) + uri_data["SSL Cert"] = str(uri.ssl_cert) + + if not c: + return retcode + + if c["errors"]: + retcode = EXIT_OOPS + + for e in c["errors"]: + errors_json.append( + {"reason": "\n" + str(e) + "\n", "errtype": "cert_info"} + ) + + if c["valid"]: + uri_data["Cert. Effective Date"] = str(c["info"]["effective"]) + uri_data["Cert. Expiration Date"] = str(c["info"]["expiration"]) + return retcode + + def collect_repository(r, pub_data): + retcode = 0 + origins_data = [] + for uri in r.origins: + origin_data = {"Origin URI": str(uri)} + if uri.disabled: + origin_data["Status"] = _("Disabled") + else: + origin_data["Status"] = _("Online") + if uri.proxies: + origin_data["Proxy"] = [str(p.uri) for p in uri.proxies] + rval = collect_ssl_info(uri, origin_data) + if rval == 1: + retcode = EXIT_PARTIAL + origins_data.append(origin_data) + + mirrors_data = [] + for uri in r.mirrors: + mirror_data = {"Mirror URI": str(uri)} + mirror_data["Status"] = _("Online") + if uri.proxies: + mirror_data["Proxy"] = [str(p.uri) for p in uri.proxies] + rval = collect_ssl_info(uri, mirror_data) + if rval == 1: + retcode = EXIT_PARTIAL + mirrors_data.append(mirror_data) + if origins_data: + pub_data["origins"] = origins_data + if mirrors_data: + pub_data["mirrors"] = mirrors_data + return retcode + + def collect_signing_certs(p, pub_data): + if p.approved_ca_certs: + pub_data["Approved CAs"] = [ + str(cert) for cert in p.approved_ca_certs + ] + if p.revoked_ca_certs: + pub_data["Revoked CAs"] = [ + str(cert) for cert in p.revoked_ca_certs + ] - for key in addl_attr_list: - if key in pi.attrs: - __append_attr_lists(addl_attr_list[key], - pi.get_attr_values(key)) + for name in pargs: + # detailed print + pub = api_inst.get_publisher(prefix=name, alias=name) + dt = api_inst.get_publisher_last_update_time(pub.prefix) + if dt: + dt = dt.strftime("%c") + + pub_data = {} + pub_data["Publisher"] = pub.prefix + pub_data["Alias"] = pub.alias + + rval = collect_repository(pub.repository, pub_data) + if rval != 0: + # There was an error in displaying some + # of the information about a repository. + # However, continue on. + retcode = rval + + pub_data["Client UUID"] = pub.client_uuid + pub_data["Catalog Updated"] = dt + collect_signing_certs(pub, pub_data) + if pub.disabled: + pub_data["enabled"] = "No" + else: + pub_data["enabled"] = "Yes" + if pub.sticky: + pub_data["sticky"] = "Yes" + else: + pub_data["sticky"] = "No" + if pub.sys_pub: + pub_data["sys_pub"] = "Yes" + else: + pub_data["sys_pub"] = "No" + if pub.properties: + pub_data["Properties"] = {} + for k, v in six.iteritems(pub.properties): + pub_data["Properties"][k] = v + data.setdefault("publisher_details", []).append(pub_data) + return __prepare_json(retcode, data=data, errors=errors_json, op=op) + + +def _info( + op, + api_inst, + pargs, + display_license, + info_local, + info_remote, + origins, + quiet, +): + """Display information about a package or packages.""" + + errors_json = [] + data = {} + if info_remote and not pargs: + error = { + "reason": _("must request remote info for specific " "packages") + } + errors_json.append(error) + return __prepare_json(EXIT_BADOPT, errors=errors_json, op=op) + + err = EXIT_OK + # Reset the progress tracker here, because we may have to switch to a + # different tracker due to the options parse. + api_inst.progresstracker = _get_tracker() + + api_inst.progresstracker.set_purpose( + api_inst.progresstracker.PURPOSE_LISTING + ) + + info_needed = api.PackageInfo.ALL_OPTIONS + if not display_license: + info_needed = api.PackageInfo.ALL_OPTIONS - frozenset( + [api.PackageInfo.LICENSES] + ) + info_needed -= api.PackageInfo.ACTION_OPTIONS + info_needed |= frozenset([api.PackageInfo.DEPENDENCIES]) + + try: + ret = api_inst.info( + pargs, info_local, info_needed, ranked=info_remote, repos=origins + ) + except api_errors.ImageFormatUpdateNeeded as e: + _format_update_error(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.NoPackagesInstalledException: + _error_json(_("no packages installed"), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ApiException as e: + _error_json(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + pis = ret[api.ImageInterface.INFO_FOUND] + notfound = ret[api.ImageInterface.INFO_MISSING] + illegals = ret[api.ImageInterface.INFO_ILLEGALS] + + if illegals: + # No other results will be returned if illegal patterns were + # specified. + for i in illegals: + errors_json.append({"reason": str(i)}) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + no_licenses = [] + for i, pi in enumerate(pis): + if display_license: + if not pi.licenses: + no_licenses.append(pi.fmri) + elif not quiet: + lics = [] + for lic in pi.licenses: + lics.append(str(lic)) + data.setdefault("licenses", []).append([pi.pkg_stem, lics]) + continue + + if quiet: + continue + + state = "" + if api.PackageInfo.INSTALLED in pi.states: + state = _("Installed") + elif api.PackageInfo.UNSUPPORTED in pi.states: + state = _("Unsupported") + else: + state = _("Not installed") + + states = [] + lparen = False + if api.PackageInfo.OBSOLETE in pi.states: + states.append(_("Obsolete")) + elif api.PackageInfo.RENAMED in pi.states: + states.append(_("Renamed")) + elif api.PackageInfo.LEGACY in pi.states: + states.append(_("Legacy")) + if api.PackageInfo.FROZEN in pi.states: + states.append(_("Frozen")) + if api.PackageInfo.MANUAL in pi.states: + states.append(_("Manually installed")) + if len(states): + state += " ({})".format(", ".join(states)) + + attr_list = [] + seen = {} + + def __append_attr_lists(label, values): + """Given arguments label and values, either extend + the existing list value or add new one to + attr_list""" + + if not isinstance(values, list): + values = [values] + if label in seen: + seen[label].extend(values) + else: + attr_list.append([label, values]) + seen[label] = values + + __append_attr_lists(_("Name"), pi.pkg_stem) + __append_attr_lists(_("Summary"), pi.summary) + if pi.description: + __append_attr_lists(_("Description"), pi.description) + if pi.category_info_list: + category_info = [] + verbose = len(pi.category_info_list) > 1 + category_info.append(pi.category_info_list[0].__str__(verbose)) + if len(pi.category_info_list) > 1: + for ci in pi.category_info_list[1:]: + category_info.append(ci.__str__(verbose)) + __append_attr_lists(_("Category"), category_info) + + __append_attr_lists(_("State"), state) + + # Renamed packages have dependencies, but the dependencies + # may not apply to this image's variants so won't be + # returned. + if api.PackageInfo.RENAMED in pi.states: + __append_attr_lists(_("Renamed to"), pi.dependencies) + + # XXX even more info on the publisher would be nice? + __append_attr_lists(_("Publisher"), pi.publisher) + hum_ver = pi.get_attr_values("pkg.human-version") + if hum_ver and hum_ver[0] != str(pi.version): + __append_attr_lists( + _("Version"), "{0} ({1})".format(pi.version, hum_ver[0]) + ) + else: + __append_attr_lists(_("Version"), str(pi.version)) + + __append_attr_lists(_("Branch"), str(pi.branch)) + __append_attr_lists(_("Packaging Date"), pi.packaging_date) + if pi.last_install: + __append_attr_lists(_("Last Install Time"), pi.last_install) + if pi.last_update: + __append_attr_lists(_("Last Update Time"), pi.last_update) + __append_attr_lists(_("Size"), misc.bytes_to_str(pi.size)) + __append_attr_lists(_("FMRI"), pi.fmri.get_fmri(include_build=False)) + # XXX add license/copyright info here? + + addl_attr_list = { + "info.keyword": _("Additional Keywords"), + "info.upstream": _("Project Contact"), + "info.maintainer": _("Project Maintainer"), + "info.maintainer-url": _("Project Maintainer URL"), + "pkg.detailed-url": _("Project URL"), + "info.upstream-url": _("Project URL"), + "info.repository-changeset": _("Repository Changeset"), + "info.repository-url": _("Source URL"), + "info.source-url": _("Source URL"), + } - for key in pi.attrs: - if key.startswith('info.source-url.'): - __append_attr_lists(addl_attr_list[key[:15]], - pi.get_attr_values(key)) + for key in addl_attr_list: + if key in pi.attrs: + __append_attr_lists( + addl_attr_list[key], pi.get_attr_values(key) + ) - if "package_attrs" not in data: - data["package_attrs"] = [attr_list] - else: - data["package_attrs"].append(attr_list) - - if notfound: - err_txt = "" - if pis: - err = EXIT_PARTIAL - if not quiet: - err_txt += "\n" - else: - err = EXIT_OOPS - if not quiet: - if info_local: - err_txt += _("""\ + for key in pi.attrs: + if key.startswith("info.source-url."): + __append_attr_lists( + addl_attr_list[key[:15]], pi.get_attr_values(key) + ) + + if "package_attrs" not in data: + data["package_attrs"] = [attr_list] + else: + data["package_attrs"].append(attr_list) + + if notfound: + err_txt = "" + if pis: + err = EXIT_PARTIAL + if not quiet: + err_txt += "\n" + else: + err = EXIT_OOPS + if not quiet: + if info_local: + err_txt += _( + """\ pkg: info: no packages matching the following patterns you specified are -installed on the system. Try querying remotely instead:\n""") - elif info_remote: - err_txt += _("""\ +installed on the system. Try querying remotely instead:\n""" + ) + elif info_remote: + err_txt += _( + """\ pkg: info: no packages matching the following patterns you specified were found in the catalog. Try relaxing the patterns, refreshing, and/or -examining the catalogs:\n""") - err_txt += "\n" - for p in notfound: - err_txt += " {0}".format(p) - errors_json.append({"reason": err_txt, - "errtype": "info_not_found"}) - - if no_licenses: - err_txt = "" - if len(no_licenses) == len(pis): - err = EXIT_OOPS - else: - err = EXIT_PARTIAL +examining the catalogs:\n""" + ) + err_txt += "\n" + for p in notfound: + err_txt += " {0}".format(p) + errors_json.append({"reason": err_txt, "errtype": "info_not_found"}) + + if no_licenses: + err_txt = "" + if len(no_licenses) == len(pis): + err = EXIT_OOPS + else: + err = EXIT_PARTIAL + + if not quiet: + err_txt += _( + "no license information could be found " + "for the following packages:\n" + ) + for pfmri in no_licenses: + err_txt += "\t{0}\n".format(pfmri) + _error_json( + err_txt, errors_json=errors_json, errorType="info_no_licenses" + ) + + return __prepare_json(err, errors=errors_json, data=data) + + +def _verify( + op, + api_inst, + pargs, + omit_headers, + parsable_version, + quiet, + verbose, + unpackaged, + unpackaged_only, + verify_paths, + display_plan_cb=None, + logger=None, +): + """Determine if installed packages match manifests.""" + + errors_json = [] + if pargs and unpackaged_only: + error = { + "reason": _( + "can not report only unpackaged contents " + "with package arguments." + ) + } + errors_json.append(error) + return __prepare_json(EXIT_BADOPT, errors=errors_json) + + return __api_op( + op, + api_inst, + args=pargs, + _noexecute=True, + _omit_headers=omit_headers, + _quiet=quiet, + _quiet_plan=True, + _verbose=verbose, + _parsable_version=parsable_version, + _unpackaged=unpackaged, + _unpackaged_only=unpackaged_only, + _verify_paths=verify_paths, + display_plan_cb=display_plan_cb, + logger=logger, + ) + + +def _fix( + op, + api_inst, + pargs, + accept, + backup_be, + backup_be_name, + be_activate, + be_name, + new_be, + noexecute, + omit_headers, + parsable_version, + quiet, + show_licenses, + verbose, + unpackaged, + display_plan_cb=None, + logger=None, +): + """Fix packaging errors found in the image.""" + + return __api_op( + op, + api_inst, + args=pargs, + _accept=accept, + _noexecute=noexecute, + _omit_headers=omit_headers, + _quiet=quiet, + _show_licenses=show_licenses, + _verbose=verbose, + backup_be=backup_be, + backup_be_name=backup_be_name, + be_activate=be_activate, + be_name=be_name, + new_be=new_be, + _parsable_version=parsable_version, + _unpackaged=unpackaged, + display_plan_cb=display_plan_cb, + logger=logger, + ) - if not quiet: - err_txt += _("no license information could be found " - "for the following packages:\n") - for pfmri in no_licenses: - err_txt += "\t{0}\n".format(pfmri) - _error_json(err_txt, errors_json=errors_json, - errorType="info_no_licenses") - - return __prepare_json(err, errors=errors_json, data=data) - -def _verify(op, api_inst, pargs, omit_headers, parsable_version, quiet, verbose, - unpackaged, unpackaged_only, verify_paths, display_plan_cb=None, logger=None): - """Determine if installed packages match manifests.""" - - errors_json = [] - if pargs and unpackaged_only: - error = {"reason": _("can not report only unpackaged contents " - "with package arguments.")} - errors_json.append(error) - return __prepare_json(EXIT_BADOPT, errors=errors_json) - - return __api_op(op, api_inst, args=pargs, _noexecute=True, - _omit_headers=omit_headers, _quiet=quiet, _quiet_plan=True, - _verbose=verbose, _parsable_version=parsable_version, - _unpackaged=unpackaged, _unpackaged_only=unpackaged_only, - _verify_paths=verify_paths, display_plan_cb=display_plan_cb, - logger=logger) - -def _fix(op, api_inst, pargs, accept, backup_be, backup_be_name, be_activate, - be_name, new_be, noexecute, omit_headers, parsable_version, quiet, - show_licenses, verbose, unpackaged, display_plan_cb=None, logger=None): - """Fix packaging errors found in the image.""" - - return __api_op(op, api_inst, args=pargs, _accept=accept, - _noexecute=noexecute, _omit_headers=omit_headers, _quiet=quiet, - _show_licenses=show_licenses, _verbose=verbose, backup_be=backup_be, - backup_be_name=backup_be_name, be_activate=be_activate, - be_name=be_name, new_be=new_be, _parsable_version=parsable_version, - _unpackaged=unpackaged, display_plan_cb=display_plan_cb, - logger=logger) def __refresh(api_inst, pubs, full_refresh=False): - """Private helper method for refreshing publisher data.""" + """Private helper method for refreshing publisher data.""" + + errors_json = [] + try: + # The user explicitly requested this refresh, so set the + # refresh to occur immediately. + api_inst.refresh(full_refresh=full_refresh, immediate=True, pubs=pubs) + except api_errors.ImageFormatUpdateNeeded as e: + _format_update_error(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.PublisherError as e: + _error_json(e, errors_json=errors_json) + _error_json( + _("'pkg publisher' will show a list of publishers."), + errors_json=errors_json, + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except (api_errors.UnknownErrors, api_errors.PermissionsException) as e: + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + _error_json("\n" + str(e), errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.CatalogRefreshException as e: + if _collect_catalog_failures(e, errors=errors_json) == 0: + return __prepare_json(EXIT_OOPS, errors=errors_json) + return __prepare_json(EXIT_PARTIAL, errors=errors_json) + return __prepare_json(EXIT_OK) - errors_json = [] - try: - # The user explicitly requested this refresh, so set the - # refresh to occur immediately. - api_inst.refresh(full_refresh=full_refresh, - immediate=True, pubs=pubs) - except api_errors.ImageFormatUpdateNeeded as e: - _format_update_error(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.PublisherError as e: - _error_json(e, errors_json=errors_json) - _error_json(_("'pkg publisher' will show a list of publishers." - ), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except (api_errors.UnknownErrors, api_errors.PermissionsException) as e: - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - _error_json("\n" + str(e), errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.CatalogRefreshException as e: - if _collect_catalog_failures(e, errors=errors_json) == 0: - return __prepare_json(EXIT_OOPS, errors=errors_json) - return __prepare_json(EXIT_PARTIAL, errors=errors_json) - return __prepare_json(EXIT_OK) def _get_ssl_cert_key(root, is_zone, ssl_cert, ssl_key): - if ssl_cert is not None or ssl_key is not None: - # In the case of zones, the ssl cert given is assumed to - # be relative to the root of the image, not truly absolute. - orig_cwd = _get_orig_cwd() - if is_zone: - if ssl_cert is not None: - ssl_cert = os.path.abspath( - root + os.sep + ssl_cert) - if ssl_key is not None: - ssl_key = os.path.abspath( - root + os.sep + ssl_key) - elif orig_cwd: - if ssl_cert and not os.path.isabs(ssl_cert): - ssl_cert = os.path.normpath(os.path.join( - orig_cwd, ssl_cert)) - if ssl_key and not os.path.isabs(ssl_key): - ssl_key = os.path.normpath(os.path.join( - orig_cwd, ssl_key)) - return ssl_cert, ssl_key - -def _set_pub_error_wrap(func, pfx, raise_errors, *args, **kwargs): - """Helper function to wrap set-publisher private methods. Returns - a tuple of (return value, message). Callers should check the return - value for errors.""" + if ssl_cert is not None or ssl_key is not None: + # In the case of zones, the ssl cert given is assumed to + # be relative to the root of the image, not truly absolute. + orig_cwd = _get_orig_cwd() + if is_zone: + if ssl_cert is not None: + ssl_cert = os.path.abspath(root + os.sep + ssl_cert) + if ssl_key is not None: + ssl_key = os.path.abspath(root + os.sep + ssl_key) + elif orig_cwd: + if ssl_cert and not os.path.isabs(ssl_cert): + ssl_cert = os.path.normpath(os.path.join(orig_cwd, ssl_cert)) + if ssl_key and not os.path.isabs(ssl_key): + ssl_key = os.path.normpath(os.path.join(orig_cwd, ssl_key)) + return ssl_cert, ssl_key - errors_json = [] - try: - return func(*args, **kwargs) - except api_errors.CatalogRefreshException as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - succeeded = _collect_catalog_failures(e, - ignore_perms_failure=True, errors=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.InvalidDepotResponseException as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - if pfx: - errors_json.append({"reason": _("The origin URIs for " - "'{pubname}' do not appear to point to a valid " - "pkg repository.\nPlease verify the repository's " - "location and the client's network configuration." - "\nAdditional details:\n\n{details}").format( - pubname=pfx, details=str(e))}) - return __prepare_json(EXIT_OOPS, errors=errors_json) - errors_json.append({"reason": _("The specified URI does " +def _set_pub_error_wrap(func, pfx, raise_errors, *args, **kwargs): + """Helper function to wrap set-publisher private methods. Returns + a tuple of (return value, message). Callers should check the return + value for errors.""" + + errors_json = [] + try: + return func(*args, **kwargs) + except api_errors.CatalogRefreshException as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + succeeded = _collect_catalog_failures( + e, ignore_perms_failure=True, errors=errors_json + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + except api_errors.InvalidDepotResponseException as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + if pfx: + errors_json.append( + { + "reason": _( + "The origin URIs for " + "'{pubname}' do not appear to point to a valid " + "pkg repository.\nPlease verify the repository's " + "location and the client's network configuration." + "\nAdditional details:\n\n{details}" + ).format(pubname=pfx, details=str(e)) + } + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + errors_json.append( + { + "reason": _( + "The specified URI does " "not appear to point to a valid pkg repository.\nPlease " "check the URI and the client's network configuration." - "\nAdditional details:\n\n{0}").format(str(e))}) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ImageFormatUpdateNeeded as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - _format_update_error(e, errors_json=errors_json) - return __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ApiException as e: - for entry in raise_errors: - if isinstance(e, entry): - raise - # Prepend a newline because otherwise the exception will - # be printed on the same line as the spinner. - errors_json.append({"reason": ("\n" + str(e))}) + "\nAdditional details:\n\n{0}" + ).format(str(e)) + } + ) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ImageFormatUpdateNeeded as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + _format_update_error(e, errors_json=errors_json) + return __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ApiException as e: + for entry in raise_errors: + if isinstance(e, entry): + raise + # Prepend a newline because otherwise the exception will + # be printed on the same line as the spinner. + errors_json.append({"reason": ("\n" + str(e))}) + return __prepare_json(EXIT_OOPS, errors=errors_json) + + +def _add_update_pub( + api_inst, + prefix, + pub=None, + disable=None, + sticky=None, + origin_uri=None, + add_mirrors=EmptyI, + remove_mirrors=EmptyI, + add_origins=EmptyI, + remove_origins=EmptyI, + enable_origins=EmptyI, + disable_origins=EmptyI, + ssl_cert=None, + ssl_key=None, + search_before=None, + search_after=None, + search_first=False, + reset_uuid=None, + refresh_allowed=False, + set_props=EmptyI, + add_prop_values=EmptyI, + remove_prop_values=EmptyI, + unset_props=EmptyI, + approved_cas=EmptyI, + revoked_cas=EmptyI, + unset_cas=EmptyI, + proxy_uri=None, +): + repo = None + new_pub = False + errors_json = [] + if not pub: + try: + pub = api_inst.get_publisher( + prefix=prefix, alias=prefix, duplicate=True + ) + if reset_uuid: + pub.reset_client_uuid() + repo = pub.repository + except api_errors.UnknownPublisher as e: + if ( + not origin_uri + and not add_origins + and ( + remove_origins + or remove_mirrors + or remove_prop_values + or add_mirrors + or enable_origins + or disable_origins + ) + ): + errors_json.append({"reason": str(e)}) return __prepare_json(EXIT_OOPS, errors=errors_json) -def _add_update_pub(api_inst, prefix, pub=None, disable=None, sticky=None, - origin_uri=None, add_mirrors=EmptyI, remove_mirrors=EmptyI, - add_origins=EmptyI, remove_origins=EmptyI, enable_origins=EmptyI, - disable_origins=EmptyI, ssl_cert=None, ssl_key=None, - search_before=None, search_after=None, search_first=False, - reset_uuid=None, refresh_allowed=False, - set_props=EmptyI, add_prop_values=EmptyI, - remove_prop_values=EmptyI, unset_props=EmptyI, approved_cas=EmptyI, - revoked_cas=EmptyI, unset_cas=EmptyI, proxy_uri=None): - - repo = None - new_pub = False - errors_json = [] - if not pub: - try: - pub = api_inst.get_publisher(prefix=prefix, - alias=prefix, duplicate=True) - if reset_uuid: - pub.reset_client_uuid() - repo = pub.repository - except api_errors.UnknownPublisher as e: - if not origin_uri and not add_origins and \ - (remove_origins or remove_mirrors or - remove_prop_values or add_mirrors or - enable_origins or disable_origins): - errors_json.append({"reason": str(e)}) - return __prepare_json(EXIT_OOPS, - errors=errors_json) - - # No pre-existing, so create a new one. - repo = publisher.Repository() - pub = publisher.Publisher(prefix, repository=repo) - new_pub = True - elif not api_inst.has_publisher(prefix=pub.prefix): - new_pub = True + # No pre-existing, so create a new one. + repo = publisher.Repository() + pub = publisher.Publisher(prefix, repository=repo) + new_pub = True + elif not api_inst.has_publisher(prefix=pub.prefix): + new_pub = True + if not repo: + repo = pub.repository if not repo: - repo = pub.repository - if not repo: - # Could be a new publisher from auto-configuration - # case where no origin was provided in repository - # configuration. - repo = publisher.Repository() - pub.repository = repo - - if sticky is not None: - # Set stickiness only if provided - pub.sticky = sticky - - if proxy_uri: - # we only support a single proxy for now. - proxies = [publisher.ProxyURI(proxy_uri)] + # Could be a new publisher from auto-configuration + # case where no origin was provided in repository + # configuration. + repo = publisher.Repository() + pub.repository = repo + + if sticky is not None: + # Set stickiness only if provided + pub.sticky = sticky + + if proxy_uri: + # we only support a single proxy for now. + proxies = [publisher.ProxyURI(proxy_uri)] + else: + proxies = [] + + if origin_uri: + # For compatibility with old -O behaviour, treat -O as a wipe + # of existing origins and add the new one. + + origin_uri = misc.parse_uri(origin_uri, cwd=_get_orig_cwd()) + + # Only use existing cert information if the new URI uses + # https for transport. + if ( + repo.origins + and not (ssl_cert or ssl_key) + and any( + origin_uri.startswith(scheme + ":") + for scheme in publisher.SSL_SCHEMES + ) + ): + for uri in repo.origins: + if ssl_cert is None: + ssl_cert = uri.ssl_cert + if ssl_key is None: + ssl_key = uri.ssl_key + break + + repo.reset_origins() + o = publisher.RepositoryURI(origin_uri, proxies=proxies) + repo.add_origin(o) + + # XXX once image configuration supports storing this + # information at the uri level, ssl info should be set + # here. + + for entry in ( + ("mirror", add_mirrors, remove_mirrors), + ("origin", add_origins, remove_origins), + ): + etype, add, remove = entry + # XXX once image configuration supports storing this + # information at the uri level, ssl info should be set + # here. + if "*" in remove: + getattr(repo, "reset_{0}s".format(etype))() + else: + for u in remove: + getattr(repo, "remove_{0}".format(etype))(u) + + for u in add: + uri = publisher.RepositoryURI(u, proxies=proxies) + try: + getattr(repo, "add_{0}".format(etype))(uri) + except ( + api_errors.DuplicateSyspubOrigin, + api_errors.DuplicateRepositoryOrigin, + ): + # If this exception occurs, we know the + # origin already exists. Then if it is + # combined with --enable or --disable, + # we turn it into an update task for the + # origin. Otherwise, raise the exception + # again. + if not (disable_origins or enable_origins): + raise + + if disable is not None: + # Set disabled property only if provided. + # If "*" in enable or disable origins list or disable without + # enable or disable origins specified, then it is a publisher + # level disable. + if not (enable_origins or disable_origins): + pub.disabled = disable else: - proxies = [] - - if origin_uri: - # For compatibility with old -O behaviour, treat -O as a wipe - # of existing origins and add the new one. - - origin_uri = misc.parse_uri(origin_uri, cwd=_get_orig_cwd()) - - # Only use existing cert information if the new URI uses - # https for transport. - if repo.origins and not (ssl_cert or ssl_key) and \ - any(origin_uri.startswith(scheme + ":") - for scheme in publisher.SSL_SCHEMES): - - for uri in repo.origins: - if ssl_cert is None: - ssl_cert = uri.ssl_cert - if ssl_key is None: - ssl_key = uri.ssl_key - break - - repo.reset_origins() - o = publisher.RepositoryURI(origin_uri, proxies=proxies) - repo.add_origin(o) - - # XXX once image configuration supports storing this - # information at the uri level, ssl info should be set - # here. - - for entry in (("mirror", add_mirrors, remove_mirrors), ("origin", - add_origins, remove_origins)): - etype, add, remove = entry - # XXX once image configuration supports storing this - # information at the uri level, ssl info should be set - # here. - if "*" in remove: - getattr(repo, "reset_{0}s".format(etype))() + if disable_origins: + if "*" in disable_origins: + for o in repo.origins: + o.disabled = True else: - for u in remove: - getattr(repo, "remove_{0}".format(etype))(u) - - for u in add: - uri = publisher.RepositoryURI(u, proxies=proxies) - try: - getattr(repo, "add_{0}".format(etype) - )(uri) - except (api_errors.DuplicateSyspubOrigin, - api_errors.DuplicateRepositoryOrigin): - # If this exception occurs, we know the - # origin already exists. Then if it is - # combined with --enable or --disable, - # we turn it into an update task for the - # origin. Otherwise, raise the exception - # again. - if not (disable_origins or enable_origins): - raise - - if disable is not None: - # Set disabled property only if provided. - # If "*" in enable or disable origins list or disable without - # enable or disable origins specified, then it is a publisher - # level disable. - if not (enable_origins or disable_origins): - pub.disabled = disable + for diso in disable_origins: + ori = repo.get_origin(diso) + ori.disabled = True + if enable_origins: + if "*" in enable_origins: + for o in repo.origins: + o.disabled = False else: - if disable_origins: - if "*" in disable_origins: - for o in repo.origins: - o.disabled = True - else: - for diso in disable_origins: - ori = repo.get_origin(diso) - ori.disabled = True - if enable_origins: - if "*" in enable_origins: - for o in repo.origins: - o.disabled = False - else: - for eno in enable_origins: - ori = repo.get_origin(eno) - ori.disabled = False - - # None is checked for here so that a client can unset a ssl_cert or - # ssl_key by using -k "" or -c "". - if ssl_cert is not None or ssl_key is not None: - # Assume the user wanted to update the ssl_cert or ssl_key - # information for *all* of the currently selected - # repository's origins and mirrors that use SSL schemes. - found_ssl = False - for uri in repo.origins: - if uri.scheme not in publisher.SSL_SCHEMES: - continue - found_ssl = True - if ssl_cert is not None: - uri.ssl_cert = ssl_cert - if ssl_key is not None: - uri.ssl_key = ssl_key - for uri in repo.mirrors: - if uri.scheme not in publisher.SSL_SCHEMES: - continue - found_ssl = True - if ssl_cert is not None: - uri.ssl_cert = ssl_cert - if ssl_key is not None: - uri.ssl_key = ssl_key - - if (ssl_cert or ssl_key) and not found_ssl: - # None of the origins or mirrors for the publisher - # use SSL schemes so the cert and key information - # won't be retained. - errors_json.append({"reason": _("Publisher '{0}' does " - "not have any SSL-based origins or mirrors." - ).format(prefix)}) - return __prepare_json(EXIT_BADOPT, errors=errors_json) - - if set_props or add_prop_values or remove_prop_values or unset_props: - pub.update_props(set_props=set_props, - add_prop_values=add_prop_values, - remove_prop_values=remove_prop_values, - unset_props=unset_props) - - if new_pub: - api_inst.add_publisher(pub, - refresh_allowed=refresh_allowed, approved_cas=approved_cas, - revoked_cas=revoked_cas, unset_cas=unset_cas, - search_after=search_after, search_before=search_before, - search_first=search_first) - else: - for ca in approved_cas: - try: - ca = os.path.normpath( - os.path.join(_get_orig_cwd(), ca)) - with open(ca, "rb") as fh: - s = fh.read() - except EnvironmentError as e: - if e.errno == errno.ENOENT: - raise api_errors.MissingFileArgumentException( - ca) - elif e.errno == errno.EACCES: - raise api_errors.PermissionsException( - ca) - raise - pub.approve_ca_cert(s) - - for hsh in revoked_cas: - pub.revoke_ca_cert(hsh) - - for hsh in unset_cas: - pub.unset_ca_cert(hsh) - - api_inst.update_publisher(pub, - refresh_allowed=refresh_allowed, search_after=search_after, - search_before=search_before, search_first=search_first) - - return __prepare_json(EXIT_OK) + for eno in enable_origins: + ori = repo.get_origin(eno) + ori.disabled = False + + # None is checked for here so that a client can unset a ssl_cert or + # ssl_key by using -k "" or -c "". + if ssl_cert is not None or ssl_key is not None: + # Assume the user wanted to update the ssl_cert or ssl_key + # information for *all* of the currently selected + # repository's origins and mirrors that use SSL schemes. + found_ssl = False + for uri in repo.origins: + if uri.scheme not in publisher.SSL_SCHEMES: + continue + found_ssl = True + if ssl_cert is not None: + uri.ssl_cert = ssl_cert + if ssl_key is not None: + uri.ssl_key = ssl_key + for uri in repo.mirrors: + if uri.scheme not in publisher.SSL_SCHEMES: + continue + found_ssl = True + if ssl_cert is not None: + uri.ssl_cert = ssl_cert + if ssl_key is not None: + uri.ssl_key = ssl_key + + if (ssl_cert or ssl_key) and not found_ssl: + # None of the origins or mirrors for the publisher + # use SSL schemes so the cert and key information + # won't be retained. + errors_json.append( + { + "reason": _( + "Publisher '{0}' does " + "not have any SSL-based origins or mirrors." + ).format(prefix) + } + ) + return __prepare_json(EXIT_BADOPT, errors=errors_json) + + if set_props or add_prop_values or remove_prop_values or unset_props: + pub.update_props( + set_props=set_props, + add_prop_values=add_prop_values, + remove_prop_values=remove_prop_values, + unset_props=unset_props, + ) + + if new_pub: + api_inst.add_publisher( + pub, + refresh_allowed=refresh_allowed, + approved_cas=approved_cas, + revoked_cas=revoked_cas, + unset_cas=unset_cas, + search_after=search_after, + search_before=search_before, + search_first=search_first, + ) + else: + for ca in approved_cas: + try: + ca = os.path.normpath(os.path.join(_get_orig_cwd(), ca)) + with open(ca, "rb") as fh: + s = fh.read() + except EnvironmentError as e: + if e.errno == errno.ENOENT: + raise api_errors.MissingFileArgumentException(ca) + elif e.errno == errno.EACCES: + raise api_errors.PermissionsException(ca) + raise + pub.approve_ca_cert(s) -def _get_orig_cwd(): - """Get the original current working directory.""" - try: - orig_cwd = os.getcwd() - except OSError as e: - try: - orig_cwd = os.environ["PWD"] - if not orig_cwd or orig_cwd[0] != "/": - orig_cwd = None - except KeyError: - orig_cwd = None - return orig_cwd - -def __pkg(subcommand, pargs_json, opts_json, pkg_image=None, - prog_delay=PROG_DELAY, prog_tracker=None, opts_mapping=misc.EmptyDict, - api_inst=None): - """Private function to invoke pkg subcommands.""" - - errors_json = [] - if subcommand is None: - err = {"reason": "Sub-command cannot be none type."} - errors_json.append(err) - return None, __prepare_json(EXIT_OOPS, errors=errors_json) - if subcommand not in cmds: - err = {"reason": "Unknown sub-command: {0}.".format( - subcommand)} - errors_json.append(err) - return None, __prepare_json(EXIT_OOPS, errors=errors_json) + for hsh in revoked_cas: + pub.revoke_ca_cert(hsh) - arg_name = "pargs_json" - try: - if pargs_json is None: - pargs = [] - # Pargs_json is already a list, use it. - elif isinstance(pargs_json, list): - pargs = pargs_json - else: - pargs = json.loads(pargs_json) - if not isinstance(pargs, list): - if not isinstance(pargs, six.string_types): - err = {"reason": "{0} is invalid.".format( - arg_name)} - errors_json.append(err) - return None, __prepare_json(EXIT_OOPS, - errors=errors_json) - misc.force_str(pargs) - pargs = [pargs] - else: - for idx in range(len(pargs)): - misc.force_str(pargs[idx]) - except Exception as e: - err = {"reason": "{0} is invalid.".format( - arg_name)} - errors_json.append(err) - return None, __prepare_json(EXIT_OOPS, errors=errors_json) + for hsh in unset_cas: + pub.unset_ca_cert(hsh) - try: - if opts_json is None: - opts = {} - # If opts_json is already a dict, use it. - elif isinstance(opts_json, dict): - opts = opts_json - else: - opts = json.loads(opts_json, object_hook=_strify) - if not isinstance(opts, dict): - err = {"reason": "opts_json is invalid."} - errors_json.append(err) - return None, __prepare_json(EXIT_OOPS, - errors=errors_json) - except: - err = {"reason": "opts_json is invalid."} - errors_json.append(err) - return None, __prepare_json(EXIT_OOPS, errors=errors_json) + api_inst.update_publisher( + pub, + refresh_allowed=refresh_allowed, + search_after=search_after, + search_before=search_before, + search_first=search_first, + ) - try: - # Validate JSON input with JSON schema. - input_schema = _get_pkg_input_schema(subcommand, - opts_mapping=opts_mapping) - json.validate({arg_name: pargs, "opts_json": opts}, - input_schema) - except json.ValidationError as e: - return None, __prepare_json(EXIT_BADOPT, - errors=[{"reason": str(e)}]) + return __prepare_json(EXIT_OK) - orig_cwd = _get_orig_cwd() - # Get ImageInterface and image object. - if not api_inst: - api_inst = __api_alloc(pkg_image, orig_cwd, - prog_delay=prog_delay, prog_tracker=prog_tracker, - errors_json=errors_json) - if api_inst is None: +def _get_orig_cwd(): + """Get the original current working directory.""" + try: + orig_cwd = os.getcwd() + except OSError as e: + try: + orig_cwd = os.environ["PWD"] + if not orig_cwd or orig_cwd[0] != "/": + orig_cwd = None + except KeyError: + orig_cwd = None + return orig_cwd + + +def __pkg( + subcommand, + pargs_json, + opts_json, + pkg_image=None, + prog_delay=PROG_DELAY, + prog_tracker=None, + opts_mapping=misc.EmptyDict, + api_inst=None, +): + """Private function to invoke pkg subcommands.""" + + errors_json = [] + if subcommand is None: + err = {"reason": "Sub-command cannot be none type."} + errors_json.append(err) + return None, __prepare_json(EXIT_OOPS, errors=errors_json) + if subcommand not in cmds: + err = {"reason": "Unknown sub-command: {0}.".format(subcommand)} + errors_json.append(err) + return None, __prepare_json(EXIT_OOPS, errors=errors_json) + + arg_name = "pargs_json" + try: + if pargs_json is None: + pargs = [] + # Pargs_json is already a list, use it. + elif isinstance(pargs_json, list): + pargs = pargs_json + else: + pargs = json.loads(pargs_json) + if not isinstance(pargs, list): + if not isinstance(pargs, six.string_types): + err = {"reason": "{0} is invalid.".format(arg_name)} + errors_json.append(err) return None, __prepare_json(EXIT_OOPS, errors=errors_json) - - func = cmds[subcommand][0] - # Get the available options for the requested operation to create the - # getopt parsing strings. - valid_opts = options.get_pkg_opts(subcommand, add_table=cmd_opts) - pargs_limit = None - if len(cmds[subcommand]) > 2: - pargs_limit = cmds[subcommand][2] - - if not valid_opts: - # if there are no options for an op, it has its own processing. - try: - if subcommand in ["unset-publisher"]: - return api_inst, func(subcommand, api_inst, pargs, - **opts) - else: - return api_inst, func(api_inst, pargs, **opts) - except getopt.GetoptError as e: - err = {"reason": str(e)} - return api_inst, __prepare_json(EXIT_OOPS, errors=err) + misc.force_str(pargs) + pargs = [pargs] + else: + for idx in range(len(pargs)): + misc.force_str(pargs[idx]) + except Exception as e: + err = {"reason": "{0} is invalid.".format(arg_name)} + errors_json.append(err) + return None, __prepare_json(EXIT_OOPS, errors=errors_json) + + try: + if opts_json is None: + opts = {} + # If opts_json is already a dict, use it. + elif isinstance(opts_json, dict): + opts = opts_json + else: + opts = json.loads(opts_json, object_hook=_strify) + if not isinstance(opts, dict): + err = {"reason": "opts_json is invalid."} + errors_json.append(err) + return None, __prepare_json(EXIT_OOPS, errors=errors_json) + except: + err = {"reason": "opts_json is invalid."} + errors_json.append(err) + return None, __prepare_json(EXIT_OOPS, errors=errors_json) + + try: + # Validate JSON input with JSON schema. + input_schema = _get_pkg_input_schema( + subcommand, opts_mapping=opts_mapping + ) + json.validate({arg_name: pargs, "opts_json": opts}, input_schema) + except json.ValidationError as e: + return None, __prepare_json(EXIT_BADOPT, errors=[{"reason": str(e)}]) + + orig_cwd = _get_orig_cwd() + + # Get ImageInterface and image object. + if not api_inst: + api_inst = __api_alloc( + pkg_image, + orig_cwd, + prog_delay=prog_delay, + prog_tracker=prog_tracker, + errors_json=errors_json, + ) + if api_inst is None: + return None, __prepare_json(EXIT_OOPS, errors=errors_json) + + func = cmds[subcommand][0] + # Get the available options for the requested operation to create the + # getopt parsing strings. + valid_opts = options.get_pkg_opts(subcommand, add_table=cmd_opts) + pargs_limit = None + if len(cmds[subcommand]) > 2: + pargs_limit = cmds[subcommand][2] + + if not valid_opts: + # if there are no options for an op, it has its own processing. try: - opt_dict = misc.opts_parse(subcommand, [], - valid_opts, opts_mapping, use_cli_opts=False, **opts) - if pargs_limit is not None and len(pargs) > pargs_limit: - err = {"reason": _("illegal argument -- {0}").format( - pargs[pargs_limit])} - return api_inst, __prepare_json(EXIT_OOPS, errors=err) - opts = options.opts_assemble(subcommand, api_inst, opt_dict, - add_table=cmd_opts, cwd=orig_cwd) - except api_errors.InvalidOptionError as e: - # We can't use the string representation of the exception since - # it references internal option names. We substitute the RAD - # options and create a new exception to make sure the messages - # are correct. - - # Convert the internal options to RAD options. We make sure that - # when there is a short and a long version for the same option - # we print both to avoid confusion. - def get_cli_opt(option): - try: - option_name = None - if option in opts_mapping: - option_name = opts_mapping[option] - - if option_name: - return option_name - else: - return option - except KeyError: - # ignore if we can't find a match - # (happens for repeated arguments or invalid - # arguments) - return option - except TypeError: - # ignore if we can't find a match - # (happens for an invalid arguments list) - return option - cli_opts = [] - opt_def = [] - - for o in e.options: - cli_opts.append(get_cli_opt(o)) - - # collect the default value (see comment below) - opt_def.append(options.get_pkg_opts_defaults(subcommand, - o, add_table=cmd_opts)) - - # Prepare for headache: - # If we have an option 'b' which is set to True by default it - # will be toggled to False if the users specifies the according - # option on the CLI. - # If we now have an option 'a' which requires option 'b' to be - # set, we can't say "'a' requires 'b'" because the user can only - # specify 'not b'. So the correct message would be: - # "'a' is incompatible with 'not b'". - # We can get there by just changing the type of the exception - # for all cases where the default value of one of the options is - # True. - if e.err_type == api_errors.InvalidOptionError.REQUIRED: - if len(opt_def) == 2 and (opt_def[0] or opt_def[1]): - e.err_type = \ - api_errors.InvalidOptionError.INCOMPAT - - # This new exception will have the CLI options, so can be passed - # directly to usage(). - new_e = api_errors.InvalidOptionError(err_type=e.err_type, - options=cli_opts, msg=e.msg) - err = {"reason": str(new_e)} - return api_inst, __prepare_json(EXIT_BADOPT, errors=err) - return api_inst, func(op=subcommand, api_inst=api_inst, - pargs=pargs, **opts) - -def __handle_errors_json(func, non_wrap_print=True, subcommand=None, - pargs_json=None, opts_json=None, pkg_image=None, - prog_delay=PROG_DELAY, prog_tracker=None, opts_mapping=misc.EmptyDict, - api_inst=None, reset_api=False): - """Error handling for pkg subcommands.""" - - traceback_str = misc.get_traceback_message() - errors_json = [] - - _api_inst = None + if subcommand in ["unset-publisher"]: + return api_inst, func(subcommand, api_inst, pargs, **opts) + else: + return api_inst, func(api_inst, pargs, **opts) + except getopt.GetoptError as e: + err = {"reason": str(e)} + return api_inst, __prepare_json(EXIT_OOPS, errors=err) + try: + opt_dict = misc.opts_parse( + subcommand, [], valid_opts, opts_mapping, use_cli_opts=False, **opts + ) + if pargs_limit is not None and len(pargs) > pargs_limit: + err = { + "reason": _("illegal argument -- {0}").format( + pargs[pargs_limit] + ) + } + return api_inst, __prepare_json(EXIT_OOPS, errors=err) + opts = options.opts_assemble( + subcommand, api_inst, opt_dict, add_table=cmd_opts, cwd=orig_cwd + ) + except api_errors.InvalidOptionError as e: + # We can't use the string representation of the exception since + # it references internal option names. We substitute the RAD + # options and create a new exception to make sure the messages + # are correct. + + # Convert the internal options to RAD options. We make sure that + # when there is a short and a long version for the same option + # we print both to avoid confusion. + def get_cli_opt(option): + try: + option_name = None + if option in opts_mapping: + option_name = opts_mapping[option] + + if option_name: + return option_name + else: + return option + except KeyError: + # ignore if we can't find a match + # (happens for repeated arguments or invalid + # arguments) + return option + except TypeError: + # ignore if we can't find a match + # (happens for an invalid arguments list) + return option + + cli_opts = [] + opt_def = [] + + for o in e.options: + cli_opts.append(get_cli_opt(o)) + + # collect the default value (see comment below) + opt_def.append( + options.get_pkg_opts_defaults(subcommand, o, add_table=cmd_opts) + ) + + # Prepare for headache: + # If we have an option 'b' which is set to True by default it + # will be toggled to False if the users specifies the according + # option on the CLI. + # If we now have an option 'a' which requires option 'b' to be + # set, we can't say "'a' requires 'b'" because the user can only + # specify 'not b'. So the correct message would be: + # "'a' is incompatible with 'not b'". + # We can get there by just changing the type of the exception + # for all cases where the default value of one of the options is + # True. + if e.err_type == api_errors.InvalidOptionError.REQUIRED: + if len(opt_def) == 2 and (opt_def[0] or opt_def[1]): + e.err_type = api_errors.InvalidOptionError.INCOMPAT + + # This new exception will have the CLI options, so can be passed + # directly to usage(). + new_e = api_errors.InvalidOptionError( + err_type=e.err_type, options=cli_opts, msg=e.msg + ) + err = {"reason": str(new_e)} + return api_inst, __prepare_json(EXIT_BADOPT, errors=err) + return api_inst, func(op=subcommand, api_inst=api_inst, pargs=pargs, **opts) + + +def __handle_errors_json( + func, + non_wrap_print=True, + subcommand=None, + pargs_json=None, + opts_json=None, + pkg_image=None, + prog_delay=PROG_DELAY, + prog_tracker=None, + opts_mapping=misc.EmptyDict, + api_inst=None, + reset_api=False, +): + """Error handling for pkg subcommands.""" + + traceback_str = misc.get_traceback_message() + errors_json = [] + + _api_inst = None + try: + # Out of memory errors can be raised as EnvironmentErrors with + # an errno of ENOMEM, so in order to handle those exceptions + # with other errnos, we nest this try block and have the outer + # one handle the other instances. try: - # Out of memory errors can be raised as EnvironmentErrors with - # an errno of ENOMEM, so in order to handle those exceptions - # with other errnos, we nest this try block and have the outer - # one handle the other instances. - try: - if non_wrap_print: - _api_inst, ret_json = func(subcommand, pargs_json, - opts_json, pkg_image=pkg_image, - prog_delay=prog_delay, - prog_tracker=prog_tracker, - opts_mapping=opts_mapping, - api_inst=api_inst) - else: - func() - except (MemoryError, EnvironmentError) as __e: - if isinstance(__e, EnvironmentError) and \ - __e.errno != errno.ENOMEM: - raise - if _api_inst: - _api_inst.abort( - result=RESULT_FAILED_OUTOFMEMORY) - _error_json(misc.out_of_memory(), - errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, - errors=errors_json) - except SystemExit as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_UNKNOWN) - raise __e - except (PipeError, KeyboardInterrupt): - if _api_inst: - _api_inst.abort(result=RESULT_CANCELED) - # We don't want to display any messages here to prevent - # possible further broken pipe (EPIPE) errors. - ret_json = __prepare_json(EXIT_OOPS) - except api_errors.LinkedImageException as __e: - _error_json(_("Linked image exception(s):\n{0}").format( - str(__e)), errors_json=errors_json) - ret_json = __prepare_json(__e.lix_exitrv, errors=errors_json) - except api_errors.CertificateError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_CONFIGURATION) - _error_json(__e, errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.PublisherError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_BAD_REQUEST) - _error_json(__e, errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.ImageLockedError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_LOCKED) - _error_json(__e, errors_json=errors_json) - ret_json = __prepare_json(EXIT_LOCKED, errors=errors_json) - except api_errors.TransportError as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_TRANSPORT) - - errors_json.append({"reason": _("Errors were encountered " + if non_wrap_print: + _api_inst, ret_json = func( + subcommand, + pargs_json, + opts_json, + pkg_image=pkg_image, + prog_delay=prog_delay, + prog_tracker=prog_tracker, + opts_mapping=opts_mapping, + api_inst=api_inst, + ) + else: + func() + except (MemoryError, EnvironmentError) as __e: + if isinstance(__e, EnvironmentError) and __e.errno != errno.ENOMEM: + raise + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_OUTOFMEMORY) + _error_json(misc.out_of_memory(), errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except SystemExit as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_UNKNOWN) + raise __e + except (PipeError, KeyboardInterrupt): + if _api_inst: + _api_inst.abort(result=RESULT_CANCELED) + # We don't want to display any messages here to prevent + # possible further broken pipe (EPIPE) errors. + ret_json = __prepare_json(EXIT_OOPS) + except api_errors.LinkedImageException as __e: + _error_json( + _("Linked image exception(s):\n{0}").format(str(__e)), + errors_json=errors_json, + ) + ret_json = __prepare_json(__e.lix_exitrv, errors=errors_json) + except api_errors.CertificateError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_CONFIGURATION) + _error_json(__e, errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.PublisherError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_BAD_REQUEST) + _error_json(__e, errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.ImageLockedError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_LOCKED) + _error_json(__e, errors_json=errors_json) + ret_json = __prepare_json(EXIT_LOCKED, errors=errors_json) + except api_errors.TransportError as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_TRANSPORT) + + errors_json.append( + { + "reason": _( + "Errors were encountered " "while attempting to retrieve package or file data " - "for the requested operation.")}) - errors_json.append({"reason": _("Details follow:\n\n{0}" - ).format(__e)}) - _collect_proxy_config_errors(errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.InvalidCatalogFile as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_STORAGE) - errors_json.append({"reason": _("An error was encountered " + "for the requested operation." + ) + } + ) + errors_json.append({"reason": _("Details follow:\n\n{0}").format(__e)}) + _collect_proxy_config_errors(errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.InvalidCatalogFile as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_STORAGE) + errors_json.append( + { + "reason": _( + "An error was encountered " "while attempting to read image state information to " "perform the requested operation. Details follow:\n\n{0}" - ).format(__e)}) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.InvalidDepotResponseException as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_TRANSPORT) - errors_json.append({"reason": _("\nUnable to contact a valid " + ).format(__e) + } + ) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.InvalidDepotResponseException as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_TRANSPORT) + errors_json.append( + { + "reason": _( + "\nUnable to contact a valid " "package repository. This may be due to a problem with " "the repository, network misconfiguration, or an " "incorrect pkg client configuration. Please verify the " "client's network configuration and repository's location." - "\nAdditional details:\n\n{0}").format(__e)}) - _collect_proxy_config_errors(errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.HistoryLoadException as __e: - # Since a history related error occurred, discard all - # information about the current operation(s) in progress. - if _api_inst: - _api_inst.clear_history() - _error_json(_("An error was encountered while attempting to " - "load history information\nabout past client operations."), - errors_json=errors_json) - _error_json(__e, errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.HistoryStoreException as __e: - # Since a history related error occurred, discard all - # information about the current operation(s) in progress. - if _api_inst: - _api_inst.clear_history() - _error_json({"reason": _("An error was encountered while " + "\nAdditional details:\n\n{0}" + ).format(__e) + } + ) + _collect_proxy_config_errors(errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.HistoryLoadException as __e: + # Since a history related error occurred, discard all + # information about the current operation(s) in progress. + if _api_inst: + _api_inst.clear_history() + _error_json( + _( + "An error was encountered while attempting to " + "load history information\nabout past client operations." + ), + errors_json=errors_json, + ) + _error_json(__e, errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.HistoryStoreException as __e: + # Since a history related error occurred, discard all + # information about the current operation(s) in progress. + if _api_inst: + _api_inst.clear_history() + _error_json( + { + "reason": _( + "An error was encountered while " "attempting to store information about the\ncurrent " "operation in client history. Details follow:\n\n{0}" - ).format(__e)}, errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.HistoryPurgeException as __e: - # Since a history related error occurred, discard all - # information about the current operation(s) in progress. - if _api_inst: - _api_inst.clear_history() - errors_json.append({"reason": _("An error was encountered " + ).format(__e) + }, + errors_json=errors_json, + ) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.HistoryPurgeException as __e: + # Since a history related error occurred, discard all + # information about the current operation(s) in progress. + if _api_inst: + _api_inst.clear_history() + errors_json.append( + { + "reason": _( + "An error was encountered " "while attempting to purge client history. " - "Details follow:\n\n{0}").format(__e)}) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.VersionException as __e: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_UNKNOWN) - _error_json(_("The pkg command appears out of sync with the " - "libraries provided\nby pkg:/package/pkg. The client " - "version is {client} while the library\nAPI version is " - "{api}.").format(client=__e.received_version, - api=__e.expected_version), errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.WrapSuccessfulIndexingException as __e: - ret_json = __prepare_json(EXIT_OK) - except api_errors.WrapIndexingException as __e: - def _wrapper(): - raise __e.wrapped - ret_json = __handle_errors_json(_wrapper, non_wrap_print=False) - - s = "" - if ret_json["status"] == 99: - s += _("\n{err}{stacktrace}").format( - err=__e, stacktrace=traceback_str) - - s += _("\n\nDespite the error while indexing, the operation " - "has completed successfuly.") - _error_json(s, errors_json=errors_json) - if "errors" in ret_json: - ret_json["errors"].extend(errors_json) - else: - ret_json["errors"] = errors_json - except api_errors.ReadOnlyFileSystemException as __e: - _error_json("The file system is read only.", - errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.UnexpectedLinkError as __e: - _error_json(str(__e), errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.UnrecognizedCatalogPart as __e: - _error_json(str(__e), errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except api_errors.InvalidConfigFile as __e: - _error_json(str(__e), errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) - except (api_errors.PkgUnicodeDecodeError, UnicodeEncodeError) as __e: - _error_json(str(__e), errors_json=errors_json) - ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + "Details follow:\n\n{0}" + ).format(__e) + } + ) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.VersionException as __e: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_UNKNOWN) + _error_json( + _( + "The pkg command appears out of sync with the " + "libraries provided\nby pkg:/package/pkg. The client " + "version is {client} while the library\nAPI version is " + "{api}." + ).format(client=__e.received_version, api=__e.expected_version), + errors_json=errors_json, + ) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.WrapSuccessfulIndexingException as __e: + ret_json = __prepare_json(EXIT_OK) + except api_errors.WrapIndexingException as __e: + + def _wrapper(): + raise __e.wrapped + + ret_json = __handle_errors_json(_wrapper, non_wrap_print=False) + + s = "" + if ret_json["status"] == 99: + s += _("\n{err}{stacktrace}").format( + err=__e, stacktrace=traceback_str + ) + + s += _( + "\n\nDespite the error while indexing, the operation " + "has completed successfuly." + ) + _error_json(s, errors_json=errors_json) + if "errors" in ret_json: + ret_json["errors"].extend(errors_json) + else: + ret_json["errors"] = errors_json + except api_errors.ReadOnlyFileSystemException as __e: + _error_json("The file system is read only.", errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.UnexpectedLinkError as __e: + _error_json(str(__e), errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.UnrecognizedCatalogPart as __e: + _error_json(str(__e), errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except api_errors.InvalidConfigFile as __e: + _error_json(str(__e), errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except (api_errors.PkgUnicodeDecodeError, UnicodeEncodeError) as __e: + _error_json(str(__e), errors_json=errors_json) + ret_json = __prepare_json(EXIT_OOPS, errors=errors_json) + except: + if _api_inst: + _api_inst.abort(result=RESULT_FAILED_UNKNOWN) + if non_wrap_print: + traceback.print_exc() + _error_json( + traceback.format_exc() + "\n" + traceback_str, + errors_json=errors_json, + ) + ret_json = __prepare_json(99, errors=errors_json) + + if reset_api: + try: + if _api_inst: + _api_inst.reset() except: - if _api_inst: - _api_inst.abort(result=RESULT_FAILED_UNKNOWN) - if non_wrap_print: - traceback.print_exc() - _error_json(traceback.format_exc()+"\n"+traceback_str, - errors_json=errors_json) - ret_json = __prepare_json(99, errors=errors_json) - - if reset_api: - try: - if _api_inst: - _api_inst.reset() - except: - # If any errors occur during reset, we will discard - # this api_inst. - _api_inst = None - + # If any errors occur during reset, we will discard + # this api_inst. + _api_inst = None + + return _api_inst, ret_json + + +def _pkg_invoke( + subcommand=None, + pargs_json=None, + opts_json=None, + pkg_image=None, + prog_delay=PROG_DELAY, + prog_tracker=None, + opts_mapping=misc.EmptyDict, + api_inst=None, + reset_api=False, + return_api=False, +): + """pkg subcommands invocation. Output will be in JSON format. + subcommand: a string type pkg subcommand. + + pargs_json: a JSON blob containing a list of pargs. + + opts_json: a JSON blob containing a dictionary of pkg + subcommand options. + + pkg_image: a string type alternate image path. + + prog_delay: long progress event delay in sec. + + prog_tracker: progress tracker object. + + alternate_pargs_name: by default, 'pargs_json' will be the name in + input JSON schema. This option allows consumer to change the + pargs_json into an alternate name. + """ + + _api_inst, ret_json = __handle_errors_json( + __pkg, + subcommand=subcommand, + pargs_json=pargs_json, + opts_json=opts_json, + pkg_image=pkg_image, + prog_delay=prog_delay, + prog_tracker=prog_tracker, + opts_mapping=opts_mapping, + api_inst=api_inst, + reset_api=reset_api, + ) + if return_api: return _api_inst, ret_json + else: + return ret_json -def _pkg_invoke(subcommand=None, pargs_json=None, opts_json=None, pkg_image=None, - prog_delay=PROG_DELAY, prog_tracker=None, opts_mapping=misc.EmptyDict, - api_inst=None, reset_api=False, return_api=False): - """pkg subcommands invocation. Output will be in JSON format. - subcommand: a string type pkg subcommand. - - pargs_json: a JSON blob containing a list of pargs. - - opts_json: a JSON blob containing a dictionary of pkg - subcommand options. - - pkg_image: a string type alternate image path. - - prog_delay: long progress event delay in sec. - - prog_tracker: progress tracker object. - - alternate_pargs_name: by default, 'pargs_json' will be the name in - input JSON schema. This option allows consumer to change the - pargs_json into an alternate name. - """ - - _api_inst, ret_json = __handle_errors_json(__pkg, - subcommand=subcommand, pargs_json=pargs_json, - opts_json=opts_json, pkg_image=pkg_image, - prog_delay=prog_delay, prog_tracker=prog_tracker, - opts_mapping=opts_mapping, api_inst=api_inst, reset_api=reset_api) - if return_api: - return _api_inst, ret_json - else: - return ret_json class ClientInterface(object): - """Class to provide a general interface to various clients.""" - - def __init__(self, pkg_image=None, prog_delay=PROG_DELAY, - prog_tracker=None, opts_mapping=misc.EmptyDict): - self.api_inst = None - self.pkg_image = pkg_image - self.prog_delay = prog_delay - self.prog_tracker = prog_tracker - self.opts_mapping = opts_mapping - - def __cmd_invoke(self, cmd, pargs_json=None, opts_json=None): - """Helper function for command invocation.""" - - # We will always reset api instance on exception. - _api_inst, ret_json = _pkg_invoke(cmd, pargs_json=pargs_json, - opts_json=opts_json, pkg_image=self.pkg_image, - prog_delay=self.prog_delay, prog_tracker=self.prog_tracker, - opts_mapping=self.opts_mapping, api_inst=self.api_inst, - reset_api=True, return_api=True) - self.api_inst = _api_inst - return ret_json + """Class to provide a general interface to various clients.""" + + def __init__( + self, + pkg_image=None, + prog_delay=PROG_DELAY, + prog_tracker=None, + opts_mapping=misc.EmptyDict, + ): + self.api_inst = None + self.pkg_image = pkg_image + self.prog_delay = prog_delay + self.prog_tracker = prog_tracker + self.opts_mapping = opts_mapping + + def __cmd_invoke(self, cmd, pargs_json=None, opts_json=None): + """Helper function for command invocation.""" + + # We will always reset api instance on exception. + _api_inst, ret_json = _pkg_invoke( + cmd, + pargs_json=pargs_json, + opts_json=opts_json, + pkg_image=self.pkg_image, + prog_delay=self.prog_delay, + prog_tracker=self.prog_tracker, + opts_mapping=self.opts_mapping, + api_inst=self.api_inst, + reset_api=True, + return_api=True, + ) + self.api_inst = _api_inst + return ret_json - def list_inventory(self, pargs_json=None, opts_json=None): - """Invoke pkg list subcommand.""" + def list_inventory(self, pargs_json=None, opts_json=None): + """Invoke pkg list subcommand.""" - return self.__cmd_invoke("list", pargs_json=pargs_json, - opts_json=opts_json) + return self.__cmd_invoke( + "list", pargs_json=pargs_json, opts_json=opts_json + ) - def info(self, pargs_json=None, opts_json=None): - """Invoke pkg info subcommand.""" + def info(self, pargs_json=None, opts_json=None): + """Invoke pkg info subcommand.""" - return self.__cmd_invoke("info", pargs_json=pargs_json, - opts_json=opts_json) + return self.__cmd_invoke( + "info", pargs_json=pargs_json, opts_json=opts_json + ) - def exact_install(self, pargs_json=None, opts_json=None): - """Invoke pkg exact-install subcommand.""" + def exact_install(self, pargs_json=None, opts_json=None): + """Invoke pkg exact-install subcommand.""" - return self.__cmd_invoke("exact-install", - pargs_json=pargs_json, opts_json=opts_json) + return self.__cmd_invoke( + "exact-install", pargs_json=pargs_json, opts_json=opts_json + ) - def install(self, pargs_json=None, opts_json=None): - """Invoke pkg install subcommand.""" + def install(self, pargs_json=None, opts_json=None): + """Invoke pkg install subcommand.""" - return self.__cmd_invoke("install", pargs_json=pargs_json, - opts_json=opts_json) + return self.__cmd_invoke( + "install", pargs_json=pargs_json, opts_json=opts_json + ) - def update(self, pargs_json=None, opts_json=None): - """Invoke pkg update subcommand.""" + def update(self, pargs_json=None, opts_json=None): + """Invoke pkg update subcommand.""" - return self.__cmd_invoke("update", pargs_json=pargs_json, - opts_json=opts_json) + return self.__cmd_invoke( + "update", pargs_json=pargs_json, opts_json=opts_json + ) - def uninstall(self, pargs_json=None, opts_json=None): - """Invoke pkg uninstall subcommand.""" + def uninstall(self, pargs_json=None, opts_json=None): + """Invoke pkg uninstall subcommand.""" - return self.__cmd_invoke("uninstall", pargs_json=pargs_json, - opts_json=opts_json) + return self.__cmd_invoke( + "uninstall", pargs_json=pargs_json, opts_json=opts_json + ) - def publisher_set(self, pargs_json=None, opts_json=None): - """Invoke pkg set-publisher subcommand.""" + def publisher_set(self, pargs_json=None, opts_json=None): + """Invoke pkg set-publisher subcommand.""" - return self.__cmd_invoke("set-publisher", - pargs_json=pargs_json, opts_json=opts_json) + return self.__cmd_invoke( + "set-publisher", pargs_json=pargs_json, opts_json=opts_json + ) - def publisher_unset(self, pargs_json=None): - """Invoke pkg unset-publisher subcommand.""" + def publisher_unset(self, pargs_json=None): + """Invoke pkg unset-publisher subcommand.""" - return self.__cmd_invoke("unset-publisher", - pargs_json=pargs_json) + return self.__cmd_invoke("unset-publisher", pargs_json=pargs_json) - def publisher_list(self, pargs_json=None, opts_json=None): - """Invoke pkg publisher subcommand.""" + def publisher_list(self, pargs_json=None, opts_json=None): + """Invoke pkg publisher subcommand.""" - return self._cmd_invoke("publisher", pargs_json=pargs_json, - opts_json=opts_json) + return self._cmd_invoke( + "publisher", pargs_json=pargs_json, opts_json=opts_json + ) - def verify(self, pargs_json=None, opts_json=None): - """Invoke pkg verify subcommand.""" + def verify(self, pargs_json=None, opts_json=None): + """Invoke pkg verify subcommand.""" - return self._cmd_invoke("verify", pargs_json=pargs_json, - opts_json=opts_json) + return self._cmd_invoke( + "verify", pargs_json=pargs_json, opts_json=opts_json + ) - def fix(self, pargs_json=None, opts_json=None): - """Invoke pkg fix subcommand.""" + def fix(self, pargs_json=None, opts_json=None): + """Invoke pkg fix subcommand.""" - return self._cmd_invoke("fix", pargs_json=pargs_json, - opts_json=opts_json) + return self._cmd_invoke( + "fix", pargs_json=pargs_json, opts_json=opts_json + ) - def get_pkg_input_schema(self, subcommand): - """Get input schema for a specific subcommand.""" + def get_pkg_input_schema(self, subcommand): + """Get input schema for a specific subcommand.""" - return _get_pkg_input_schema(subcommand, - opts_mapping=self.opts_mapping) + return _get_pkg_input_schema(subcommand, opts_mapping=self.opts_mapping) - def get_pkg_output_schema(self, subcommand): - """Get output schema for a specific subcommand.""" + def get_pkg_output_schema(self, subcommand): + """Get output schema for a specific subcommand.""" - return _get_pkg_output_schema(subcommand) + return _get_pkg_output_schema(subcommand) cmds = { - "exact-install" : [_exact_install, __pkg_exact_install_output_schema], - "fix" : [_fix, __pkg_fix_output_schema], - "list" : [_list_inventory, __pkg_list_output_schema], - "install" : [_install, __pkg_install_output_schema], - "update" : [_update, __pkg_update_output_schema], - "uninstall" : [_uninstall, __pkg_uninstall_output_schema], - "set-publisher" : [_publisher_set, - __pkg_publisher_set_output_schema], - "unset-publisher" : [_publisher_unset, - __pkg_publisher_unset_output_schema], - "publisher" : [_publisher_list, __pkg_publisher_output_schema], - "info" : [_info, __pkg_info_output_schema], - "verify" : [_verify, __pkg_verify_output_schema] + "exact-install": [_exact_install, __pkg_exact_install_output_schema], + "fix": [_fix, __pkg_fix_output_schema], + "list": [_list_inventory, __pkg_list_output_schema], + "install": [_install, __pkg_install_output_schema], + "update": [_update, __pkg_update_output_schema], + "uninstall": [_uninstall, __pkg_uninstall_output_schema], + "set-publisher": [_publisher_set, __pkg_publisher_set_output_schema], + "unset-publisher": [_publisher_unset, __pkg_publisher_unset_output_schema], + "publisher": [_publisher_list, __pkg_publisher_output_schema], + "info": [_info, __pkg_info_output_schema], + "verify": [_verify, __pkg_verify_output_schema], } # Addendum table for option extensions. cmd_opts = {} # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/debugvalues.py b/src/modules/client/debugvalues.py index 8425ab13b..a4291bf5a 100644 --- a/src/modules/client/debugvalues.py +++ b/src/modules/client/debugvalues.py @@ -26,38 +26,38 @@ import six + class Singleton(type): - """Set __metaclass__ to Singleton to create a singleton. - See http://en.wikipedia.org/wiki/Singleton_pattern """ + """Set __metaclass__ to Singleton to create a singleton. + See http://en.wikipedia.org/wiki/Singleton_pattern""" - def __init__(self, name, bases, dictionary): - super(Singleton, self).__init__(name, bases, dictionary) - self.instance = None + def __init__(self, name, bases, dictionary): + super(Singleton, self).__init__(name, bases, dictionary) + self.instance = None - def __call__(self, *args, **kw): - if self.instance is None: - self.instance = super(Singleton, self).__call__(*args, - **kw) + def __call__(self, *args, **kw): + if self.instance is None: + self.instance = super(Singleton, self).__call__(*args, **kw) - return self.instance + return self.instance class DebugValues(six.with_metaclass(Singleton, dict)): - """Singleton dict that returns None if unknown value - is referenced""" + """Singleton dict that returns None if unknown value + is referenced""" - def __getitem__(self, item): - """ returns None if not set """ - return self.get(item, None) + def __getitem__(self, item): + """returns None if not set""" + return self.get(item, None) - def get_value(self, key): - return self[key] + def get_value(self, key): + return self[key] - def set_value(self, key, value): - self[key] = value + def set_value(self, key, value): + self[key] = value -DebugValues=DebugValues() +DebugValues = DebugValues() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/firmware.py b/src/modules/client/firmware.py index 5cd89d24c..6c0333772 100644 --- a/src/modules/client/firmware.py +++ b/src/modules/client/firmware.py @@ -36,98 +36,133 @@ class Firmware(object): - def __init__(self): - self.__firmware = {} # cache of things we've checked already - - def check_firmware(self, dep_action, firmware_name): - """Check firmware dependency. - returns ((true, false, none (internal error)), - error text)""" - - firmware_dir = "/usr/lib/fwenum" - # leverage smf test infrastructure - cmds_dir = DebugValues["smf_cmds_dir"] - if DebugValues["firmware-dependency-bypass"]: - return (True, None) - if cmds_dir: # we're testing; - firmware_dir = cmds_dir - - args = [os.path.join(firmware_dir, firmware_name[len("feature/firmware/"):])] - args.extend([ - "{0}={1}".format(k, quote_attr_value(v)) - for k,v in sorted(six.iteritems(dep_action.attrs)) - if k not in ["type", "root-image", "fmri"] - ]) - - key = str(args) - - # use a cache since each check may be expensive and each - # pkg version may have the same dependency. - # ignore non-solaris systems here - - if portable.osname != "sunos" and key not in self.firmware: - self.__firmware[key] = (True, None) - - if key not in self.__firmware: - try: - proc = subprocess.Popen(args, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - # output from proc is bytes - buf = [misc.force_str(l) for l in - proc.stdout.readlines()] - ret = proc.wait() - # if there was output, something went wrong. - # Since generic errors are often exit(1), - # map this to an internal error. - if ret == 1 and len(buf) > 0: - ret = 255 - if ret == 0: - ans = (True, None) - elif 0 < ret <= 239: - ans = (False, (_("There are {0} instances" - " of downrev firmware for the '{1}' " - " devices present on this system. " - "Update each to version {2} or better." - ).format(ret, args[1], - dep_action.attrs.get("minimum-version", - _("UNSPECIFIED"))))) - elif ret == 240: - ans = (False, (_("There are 240 or more " - "instances of downrev firmware for the" - "'{0}' devices present on this system. " - "Update each to version {1} or better." - ).format(args[1], - dep_action.attrs.get("minimum-version", - _("UNSPECIFIED"))))) - elif ret < 0: - ans = (None, - (_("Firmware dependency error: {0} " - " exited due to signal {1}").format( - " ".join(args), misc.signame(-ret)))) - else: - ans = (None, - (_("Firmware dependency error: General " - "internal error {0} running '{1}': '{2}'" - ).format(str(ret), " ".join(args), - "\n".join(buf)))) - - except OSError as e: - # we have no enumerator installed. This can - # occur if this driver is being installed - # for the first time or, more likely, we - # just added enumerators & a firmware dependency - # for the first time. For now, drive on and - # ignore this to permit the addition of such - # dependencies concurrently with their - # enumerarators. - # ans = (None, (_("Firmware dependency error:" - # " Cannot exec {0}: {1}").format(" ".join(args) - # , str(e)))) - ans = (True, 0) - - self.__firmware[key] = ans - - return self.__firmware[key] + def __init__(self): + self.__firmware = {} # cache of things we've checked already + + def check_firmware(self, dep_action, firmware_name): + """Check firmware dependency. + returns ((true, false, none (internal error)), + error text)""" + + firmware_dir = "/usr/lib/fwenum" + # leverage smf test infrastructure + cmds_dir = DebugValues["smf_cmds_dir"] + if DebugValues["firmware-dependency-bypass"]: + return (True, None) + if cmds_dir: # we're testing; + firmware_dir = cmds_dir + + args = [ + os.path.join( + firmware_dir, firmware_name[len("feature/firmware/") :] + ) + ] + args.extend( + [ + "{0}={1}".format(k, quote_attr_value(v)) + for k, v in sorted(six.iteritems(dep_action.attrs)) + if k not in ["type", "root-image", "fmri"] + ] + ) + + key = str(args) + + # use a cache since each check may be expensive and each + # pkg version may have the same dependency. + # ignore non-solaris systems here + + if portable.osname != "sunos" and key not in self.firmware: + self.__firmware[key] = (True, None) + + if key not in self.__firmware: + try: + proc = subprocess.Popen( + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) + # output from proc is bytes + buf = [misc.force_str(l) for l in proc.stdout.readlines()] + ret = proc.wait() + # if there was output, something went wrong. + # Since generic errors are often exit(1), + # map this to an internal error. + if ret == 1 and len(buf) > 0: + ret = 255 + if ret == 0: + ans = (True, None) + elif 0 < ret <= 239: + ans = ( + False, + ( + _( + "There are {0} instances" + " of downrev firmware for the '{1}' " + " devices present on this system. " + "Update each to version {2} or better." + ).format( + ret, + args[1], + dep_action.attrs.get( + "minimum-version", _("UNSPECIFIED") + ), + ) + ), + ) + elif ret == 240: + ans = ( + False, + ( + _( + "There are 240 or more " + "instances of downrev firmware for the" + "'{0}' devices present on this system. " + "Update each to version {1} or better." + ).format( + args[1], + dep_action.attrs.get( + "minimum-version", _("UNSPECIFIED") + ), + ) + ), + ) + elif ret < 0: + ans = ( + None, + ( + _( + "Firmware dependency error: {0} " + " exited due to signal {1}" + ).format(" ".join(args), misc.signame(-ret)) + ), + ) + else: + ans = ( + None, + ( + _( + "Firmware dependency error: General " + "internal error {0} running '{1}': '{2}'" + ).format(str(ret), " ".join(args), "\n".join(buf)) + ), + ) + + except OSError as e: + # we have no enumerator installed. This can + # occur if this driver is being installed + # for the first time or, more likely, we + # just added enumerators & a firmware dependency + # for the first time. For now, drive on and + # ignore this to permit the addition of such + # dependencies concurrently with their + # enumerarators. + # ans = (None, (_("Firmware dependency error:" + # " Cannot exec {0}: {1}").format(" ".join(args) + # , str(e)))) + ans = (True, 0) + + self.__firmware[key] = ans + + return self.__firmware[key] + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/history.py b/src/modules/client/history.py index 00a6fd1df..be5cd95bd 100644 --- a/src/modules/client/history.py +++ b/src/modules/client/history.py @@ -105,41 +105,66 @@ MemoryError: RESULT_FAILED_OUTOFMEMORY, } -class _HistoryOperation(object): - """A _HistoryOperation object is a representation of data about an - operation that a pkg(7) client has performed. This class is private - and not intended for use by classes other than History. - - This class provides an abstraction layer between the stack of - operations that History manages should these values need to be - manipulated as they are set or retrieved. - """ - result_l10n = {} # Static variable for dictionary - - def __copy__(self): - h = _HistoryOperation() - for attr in ("name", "start_time", "end_time", "start_state", - "end_state", "username", "userid", "be", "be_exists", - "be_uuid", "current_be", "current_new_be", "new_be", - "new_be_exists", "new_be_uuid", "result", "release_notes", - "snapshot"): - setattr(h, attr, getattr(self, attr)) - h.errors = [copy.copy(e) for e in self.errors] - return h - - def __setattr__(self, name, value): - if name not in ("result", "errors", "be", "be_uuid", - "current_be", "current_new_be", "new_be", "new_be_exists", - "new_be_uuid", "snapshot"): - # Force all other attribute values to be a string - # to avoid issues with minidom. - value = str(value) - - return object.__setattr__(self, name, value) - - def __str__(self): - return """\ +class _HistoryOperation(object): + """A _HistoryOperation object is a representation of data about an + operation that a pkg(7) client has performed. This class is private + and not intended for use by classes other than History. + + This class provides an abstraction layer between the stack of + operations that History manages should these values need to be + manipulated as they are set or retrieved. + """ + + result_l10n = {} # Static variable for dictionary + + def __copy__(self): + h = _HistoryOperation() + for attr in ( + "name", + "start_time", + "end_time", + "start_state", + "end_state", + "username", + "userid", + "be", + "be_exists", + "be_uuid", + "current_be", + "current_new_be", + "new_be", + "new_be_exists", + "new_be_uuid", + "result", + "release_notes", + "snapshot", + ): + setattr(h, attr, getattr(self, attr)) + h.errors = [copy.copy(e) for e in self.errors] + return h + + def __setattr__(self, name, value): + if name not in ( + "result", + "errors", + "be", + "be_uuid", + "current_be", + "current_new_be", + "new_be", + "new_be_exists", + "new_be_uuid", + "snapshot", + ): + # Force all other attribute values to be a string + # to avoid issues with minidom. + value = str(value) + + return object.__setattr__(self, name, value) + + def __str__(self): + return """\ Operation Name: {0} Operation Result: {1} Operation Start Time: {2} @@ -159,757 +184,774 @@ def __str__(self): Operation Release Notes: {15} Operation Errors: {16} -""".format(self.name, self.result, self.start_time, self.end_time, - self.start_state, self.end_state, self.username, self.userid, - self.be, self.current_be, self.be_uuid, self.new_be, self.current_new_be, - self.new_be_uuid, self.snapshot, self.release_notes, self.errors) - - # All "time" values should be in UTC, using ISO 8601 as the format. - # Name of the operation performed (e.g. install, update, etc.). - name = None - # When the operation started. - start_time = None - # When the operation ended. - end_time = None - # The starting state of the operation (e.g. image plan pre-evaluation). - start_state = None - # The ending state of the operation (e.g. image plan post-evaluation). - end_state = None - # Errors encountered during an operation. - errors = None - # username of the user that performed the operation. - username = None - # id of the user that performed the operation. - userid = None - # The boot environment on which the user performed the operation - be = None - # The current name of the boot environment. - current_be = None - # The uuid of the BE on which the user performed the operation - be_uuid = None - # The new boot environment that was created as a result of the operation - new_be = None - # The current name of the new boot environment. - current_new_be = None - # The uuid of the boot environment that was created as a result of the - # operation - new_be_uuid = None - # The name of the file containing the release notes, or None. - release_notes = None - - # The snapshot that was created while running this operation - # set to None if no snapshot was taken, or destroyed after successful - # completion - snapshot = None - - # The result of the operation (must be a list indicating (outcome, - # reason)). - result = None - - def __init__(self): - self.errors = [] - - @property - def result_text(self): - """Returns a tuple containing the translated text for the - operation result of the form (outcome, reason).""" - - if not _HistoryOperation.result_l10n: - # since we store english text in our XML files, we - # need a way for clients obtain a translated version - # of these messages. - _HistoryOperation.result_l10n = { - "Canceled": _("Canceled"), - "Failed": _("Failed"), - "Ignored": _("Ignored"), - "Nothing to do": _("Nothing to do"), - "Succeeded": _("Succeeded"), - "Bad Request": _("Bad Request"), - "Configuration": _("Configuration"), - "Constrained": _("Constrained"), - "Locked": _("Locked"), - "Search": _("Search"), - "Storage": _("Storage"), - "Transport": _("Transport"), - "Actuator": _("Actuator"), - "Out of Memory": _("Out of Memory"), - "Conflicting Actions": _("Conflicting Actions"), - "Unknown": _("Unknown"), - "None": _("None") - } - - if not self.start_time or not self.result: - return ("", "") - return (_HistoryOperation.result_l10n[self.result[0]], - _HistoryOperation.result_l10n[self.result[1]]) +""".format( + self.name, + self.result, + self.start_time, + self.end_time, + self.start_state, + self.end_state, + self.username, + self.userid, + self.be, + self.current_be, + self.be_uuid, + self.new_be, + self.current_new_be, + self.new_be_uuid, + self.snapshot, + self.release_notes, + self.errors, + ) + + # All "time" values should be in UTC, using ISO 8601 as the format. + # Name of the operation performed (e.g. install, update, etc.). + name = None + # When the operation started. + start_time = None + # When the operation ended. + end_time = None + # The starting state of the operation (e.g. image plan pre-evaluation). + start_state = None + # The ending state of the operation (e.g. image plan post-evaluation). + end_state = None + # Errors encountered during an operation. + errors = None + # username of the user that performed the operation. + username = None + # id of the user that performed the operation. + userid = None + # The boot environment on which the user performed the operation + be = None + # The current name of the boot environment. + current_be = None + # The uuid of the BE on which the user performed the operation + be_uuid = None + # The new boot environment that was created as a result of the operation + new_be = None + # The current name of the new boot environment. + current_new_be = None + # The uuid of the boot environment that was created as a result of the + # operation + new_be_uuid = None + # The name of the file containing the release notes, or None. + release_notes = None + + # The snapshot that was created while running this operation + # set to None if no snapshot was taken, or destroyed after successful + # completion + snapshot = None + + # The result of the operation (must be a list indicating (outcome, + # reason)). + result = None + + def __init__(self): + self.errors = [] + + @property + def result_text(self): + """Returns a tuple containing the translated text for the + operation result of the form (outcome, reason).""" + + if not _HistoryOperation.result_l10n: + # since we store english text in our XML files, we + # need a way for clients obtain a translated version + # of these messages. + _HistoryOperation.result_l10n = { + "Canceled": _("Canceled"), + "Failed": _("Failed"), + "Ignored": _("Ignored"), + "Nothing to do": _("Nothing to do"), + "Succeeded": _("Succeeded"), + "Bad Request": _("Bad Request"), + "Configuration": _("Configuration"), + "Constrained": _("Constrained"), + "Locked": _("Locked"), + "Search": _("Search"), + "Storage": _("Storage"), + "Transport": _("Transport"), + "Actuator": _("Actuator"), + "Out of Memory": _("Out of Memory"), + "Conflicting Actions": _("Conflicting Actions"), + "Unknown": _("Unknown"), + "None": _("None"), + } + + if not self.start_time or not self.result: + return ("", "") + return ( + _HistoryOperation.result_l10n[self.result[0]], + _HistoryOperation.result_l10n[self.result[1]], + ) class History(object): - """A History object is a representation of data about a pkg(7) client - and about operations that the client is executing or has executed. It - uses the _HistoryOperation class to represent the data about an - operation. + """A History object is a representation of data about a pkg(7) client + and about operations that the client is executing or has executed. It + uses the _HistoryOperation class to represent the data about an + operation. + """ + + # The directory where the history directory can be found (or + # created if it doesn't exist). + root_dir = None + # The name of the client (e.g. pkg, etc.) + client_name = None + # The version of the client (e.g. 093ca22da67c). + client_version = None + # How the client was invoked (e.g. 'pkg install -n foo'). + client_args = None + + # A stack where operation data will actually be stored. + __operations = [] + + # A private property used by preserve() and restore() to store snapshots + # of history and operation state information. + __snapshot = None + + # These attributes exist to fake access to the operations stack. + operation_name = None + operation_username = None + operation_userid = None + operation_current_be = None + operation_be = None + operation_be_uuid = None + operation_current_new_be = None + operation_new_be = None + operation_new_be_uuid = None + operation_start_time = None + operation_end_time = None + operation_start_state = None + operation_end_state = None + operation_snapshot = None + operation_errors = None + operation_result = None + operation_release_notes = None + + def __copy__(self): + h = History() + for attr in ("root_dir", "client_name", "client_version"): + setattr(h, attr, getattr(self, attr)) + object.__setattr__( + self, "client_args", [copy.copy(a) for a in self.client_args] + ) + # A deepcopy has to be performed here since this a list of dicts + # and not just History operation objects. + h.__operations = [copy.deepcopy(o) for o in self.__operations] + return h + + def __getattribute__(self, name): + if name == "client_args": + return object.__getattribute__(self, name)[:] + + if not name.startswith("operation_"): + return object.__getattribute__(self, name) + + ops = object.__getattribute__(self, "_History__operations") + if not ops: + return None + + return getattr(ops[-1]["operation"], name[len("operation_") :]) + + def __setattr__(self, name, value): + if name == "client_args": + raise AttributeError( + "'history' object attribute '{0}' " "is read-only.".format(name) + ) + + if not name.startswith("operation_"): + return object.__setattr__(self, name, value) + + ops = object.__getattribute__(self, "_History__operations") + if name == "operation_name": + if not ops: + ops = [] + object.__setattr__(self, "_History__operations", ops) + + ops.append({"pathname": None, "operation": _HistoryOperation()}) + elif not ops: + raise AttributeError( + "'history' object attribute '{0}' " + "cannot be set before 'operation_name'.".format(name) + ) + + op = ops[-1]["operation"] + setattr(op, name[len("operation_") :], value) + + # Access to the class attributes is done through object instead + # of just referencing self to avoid any of the special logic in + # place interfering with logic here. + if name == "operation_name": + # Before a new operation starts, clear exception state + # for the current one so that when this one ends, the + # last operation's exception won't be recorded to this + # one. If the error hasn't been recorded by now, it + # doesn't matter anyway, so should be safe to clear. + # sys.exc_clear() isn't supported in Python 3, and + # couldn't find a replacement. + try: + sys.exc_clear() + except: + pass + + # Mark the operation as having started and record + # other, relevant information. + op.start_time = misc.time_to_timestamp(None) + try: + op.username = portable.get_username() + except KeyError: + op.username = "unknown" + op.userid = portable.get_userid() + + ca = None + if sys.argv[0]: + ca = [sys.argv[0]] + else: + # Fallback for clients that provide no value. + ca = [self.client_name] + + ca.extend(sys.argv[1:]) + object.__setattr__(self, "client_args", ca) + object.__setattr__(self, "client_version", pkg.VERSION) + + elif name == "operation_result": + # Record when the operation ended. + op.end_time = misc.time_to_timestamp(None) + + # Some operations shouldn't be saved -- they're merely + # included in the stack for completeness or to support + # client functionality. + if ( + op.name not in DISCARDED_OPERATIONS + and value != RESULT_NOTHING_TO_DO + ): + # Write current history and last operation to a + # file. + self.__save() + + # Discard it now that it is no longer needed. + del ops[-1] + + def __init__(self, root_dir=".", filename=None, uuid_be_dic=None): + """'root_dir' should be the path of the directory where the + history directory can be found (or created if it doesn't + exist). 'filename' should be the name of an XML file + containing serialized history information to load. + 'uuid_be_dic', if supplied, should be a dictionary of BE uuid + information, as produced by + pkg.client.bootenv.BootEnv.get_uuid_be_dic(), otherwise that + method is called each time a History object is created. + """ + # Since this is a read-only attribute normally, we have to + # bypass our setattr override by calling object. + object.__setattr__(self, "client_args", []) + + # Initialize client_name to what the client thinks it is. This + # will be overridden if we load history entries off disk. + self.client_name = pkg.client.global_settings.client_name + + self.root_dir = root_dir + if filename: + self.__load(filename, uuid_be_dic=uuid_be_dic) + + def __str__(self): + ops = self.__operations + return "\n".join([str(op["operation"]) for op in ops]) + + @property + def path(self): + """The directory where history files will be written to or + read from. """ + return os.path.join(self.root_dir, "history") - # The directory where the history directory can be found (or - # created if it doesn't exist). - root_dir = None - # The name of the client (e.g. pkg, etc.) - client_name = None - # The version of the client (e.g. 093ca22da67c). - client_version = None - # How the client was invoked (e.g. 'pkg install -n foo'). - client_args = None - - # A stack where operation data will actually be stored. - __operations = [] - - # A private property used by preserve() and restore() to store snapshots - # of history and operation state information. - __snapshot = None - - # These attributes exist to fake access to the operations stack. - operation_name = None - operation_username = None - operation_userid = None - operation_current_be = None - operation_be = None - operation_be_uuid = None - operation_current_new_be = None - operation_new_be = None - operation_new_be_uuid = None - operation_start_time = None - operation_end_time = None - operation_start_state = None - operation_end_state = None - operation_snapshot = None - operation_errors = None - operation_result = None - operation_release_notes = None - - def __copy__(self): - h = History() - for attr in ("root_dir", "client_name", "client_version"): - setattr(h, attr, getattr(self, attr)) - object.__setattr__(self, "client_args", - [copy.copy(a) for a in self.client_args]) - # A deepcopy has to be performed here since this a list of dicts - # and not just History operation objects. - h.__operations = [copy.deepcopy(o) for o in self.__operations] - return h - - def __getattribute__(self, name): - if name == "client_args": - return object.__getattribute__(self, name)[:] - - if not name.startswith("operation_"): - return object.__getattribute__(self, name) - - ops = object.__getattribute__(self, "_History__operations") - if not ops: - return None - - return getattr(ops[-1]["operation"], name[len("operation_"):]) - - def __setattr__(self, name, value): - if name == "client_args": - raise AttributeError("'history' object attribute '{0}' " - "is read-only.".format(name)) - - if not name.startswith("operation_"): - return object.__setattr__(self, name, value) - - ops = object.__getattribute__(self, "_History__operations") - if name == "operation_name": - if not ops: - ops = [] - object.__setattr__(self, - "_History__operations", ops) - - ops.append({ - "pathname": None, - "operation": _HistoryOperation() - }) - elif not ops: - raise AttributeError("'history' object attribute '{0}' " - "cannot be set before 'operation_name'.".format( - name)) - - op = ops[-1]["operation"] - setattr(op, name[len("operation_"):], value) - - # Access to the class attributes is done through object instead - # of just referencing self to avoid any of the special logic in - # place interfering with logic here. - if name == "operation_name": - # Before a new operation starts, clear exception state - # for the current one so that when this one ends, the - # last operation's exception won't be recorded to this - # one. If the error hasn't been recorded by now, it - # doesn't matter anyway, so should be safe to clear. - # sys.exc_clear() isn't supported in Python 3, and - # couldn't find a replacement. - try: - sys.exc_clear() - except: - pass - - # Mark the operation as having started and record - # other, relevant information. - op.start_time = misc.time_to_timestamp(None) - try: - op.username = portable.get_username() - except KeyError: - op.username = "unknown" - op.userid = portable.get_userid() - - ca = None - if sys.argv[0]: - ca = [sys.argv[0]] - else: - # Fallback for clients that provide no value. - ca = [self.client_name] - - ca.extend(sys.argv[1:]) - object.__setattr__(self, "client_args", ca) - object.__setattr__(self, "client_version", pkg.VERSION) - - elif name == "operation_result": - # Record when the operation ended. - op.end_time = misc.time_to_timestamp(None) - - # Some operations shouldn't be saved -- they're merely - # included in the stack for completeness or to support - # client functionality. - if op.name not in DISCARDED_OPERATIONS and \ - value != RESULT_NOTHING_TO_DO: - # Write current history and last operation to a - # file. - self.__save() - - # Discard it now that it is no longer needed. - del ops[-1] - - def __init__(self, root_dir=".", filename=None, uuid_be_dic=None): - """'root_dir' should be the path of the directory where the - history directory can be found (or created if it doesn't - exist). 'filename' should be the name of an XML file - containing serialized history information to load. - 'uuid_be_dic', if supplied, should be a dictionary of BE uuid - information, as produced by - pkg.client.bootenv.BootEnv.get_uuid_be_dic(), otherwise that - method is called each time a History object is created. - """ - # Since this is a read-only attribute normally, we have to - # bypass our setattr override by calling object. - object.__setattr__(self, "client_args", []) - - # Initialize client_name to what the client thinks it is. This - # will be overridden if we load history entries off disk. - self.client_name = pkg.client.global_settings.client_name - - self.root_dir = root_dir - if filename: - self.__load(filename, uuid_be_dic=uuid_be_dic) - - def __str__(self): - ops = self.__operations - return "\n".join([str(op["operation"]) for op in ops]) - - @property - def path(self): - """The directory where history files will be written to or - read from. - """ - return os.path.join(self.root_dir, "history") - - @property - def pathname(self): - """Returns the pathname that the history information was read - from or will attempted to be written to. Returns None if no - operation has started yet or if no operation has been loaded. - """ - if not self.operation_start_time: - return None - - ops = self.__operations - pathname = ops[-1]["pathname"] - if not pathname: - return os.path.join(self.path, - "{0}-01.xml".format(ops[-1]["operation"].start_time)) - return pathname - - @property - def notes(self): - """Generates the lines of release notes for this operation. - If no release notes are present, no output occurs.""" - - if not self.operation_release_notes: - return - try: - rpath = os.path.join(self.root_dir, - "notes", - self.operation_release_notes) - for a in open(rpath, "r"): - yield a.rstrip() - - except Exception as e: - raise apx.HistoryLoadException(e) - - def clear(self): - """Discards all information related to the current history - object. - """ - self.client_name = None - self.client_version = None - object.__setattr__(self, "client_args", []) - self.__operations = [] - - def __load_client_data(self, node): - """Internal function to load the client data from the given XML - 'node' object. - """ - self.client_name = node.getAttribute("name") - self.client_version = node.getAttribute("version") + @property + def pathname(self): + """Returns the pathname that the history information was read + from or will attempted to be written to. Returns None if no + operation has started yet or if no operation has been loaded. + """ + if not self.operation_start_time: + return None + + ops = self.__operations + pathname = ops[-1]["pathname"] + if not pathname: + return os.path.join( + self.path, "{0}-01.xml".format(ops[-1]["operation"].start_time) + ) + return pathname + + @property + def notes(self): + """Generates the lines of release notes for this operation. + If no release notes are present, no output occurs.""" + + if not self.operation_release_notes: + return + try: + rpath = os.path.join( + self.root_dir, "notes", self.operation_release_notes + ) + for a in open(rpath, "r"): + yield a.rstrip() + + except Exception as e: + raise apx.HistoryLoadException(e) + + def clear(self): + """Discards all information related to the current history + object. + """ + self.client_name = None + self.client_version = None + object.__setattr__(self, "client_args", []) + self.__operations = [] + + def __load_client_data(self, node): + """Internal function to load the client data from the given XML + 'node' object. + """ + self.client_name = node.getAttribute("name") + self.client_version = node.getAttribute("version") + try: + args = node.getElementsByTagName("args")[0] + except IndexError: + # There might not be any. + pass + else: + ca = object.__getattribute__(self, "client_args") + for cnode in args.getElementsByTagName("arg"): try: - args = node.getElementsByTagName("args")[0] - except IndexError: - # There might not be any. - pass - else: - ca = object.__getattribute__(self, "client_args") - for cnode in args.getElementsByTagName("arg"): - try: - ca.append(cnode.childNodes[0].wholeText) - except (AttributeError, IndexError): - # There may be no childNodes, or - # wholeText may not be defined. - pass - - @staticmethod - def __load_operation_data(node, uuid_be_dic): - """Internal function to load the operation data from the given - XML 'node' object and return a _HistoryOperation object. - """ - op = _HistoryOperation() - op.name = node.getAttribute("name") - op.start_time = node.getAttribute("start_time") - op.end_time = node.getAttribute("end_time") - op.username = node.getAttribute("username") - op.userid = node.getAttribute("userid") - op.result = node.getAttribute("result").split(", ") - - if len(op.result) == 1: - op.result.append("None") - - # older clients simply wrote "Nothing to do" instead of - # "Ignored, Nothing to do", so work around that - if op.result[0] == "Nothing to do": - op.result = RESULT_NOTHING_TO_DO - - if node.hasAttribute("be_uuid"): - op.be_uuid = node.getAttribute("be_uuid") - if node.hasAttribute("new_be_uuid"): - op.new_be_uuid = node.getAttribute("new_be_uuid") - if node.hasAttribute("be"): - op.be = node.getAttribute("be") - if op.be_uuid: - op.current_be = uuid_be_dic.get(op.be_uuid, - op.be) - if node.hasAttribute("new_be"): - op.new_be = node.getAttribute("new_be") - if op.new_be_uuid: - op.current_new_be = uuid_be_dic.get( - op.new_be_uuid, op.new_be) - if node.hasAttribute("release-notes"): - op.release_notes = node.getAttribute("release-notes") - - def get_node_values(parent_name, child_name=None): - try: - parent = node.getElementsByTagName(parent_name)[0] - if child_name: - cnodes = parent.getElementsByTagName( - child_name) - return [ - cnode.childNodes[0].wholeText - for cnode in cnodes - ] - return parent.childNodes[0].wholeText - except (AttributeError, IndexError): - # Assume no values are present for the node. - pass - if child_name: - return [] - return - - op.start_state = get_node_values("start_state") - op.end_state = get_node_values("end_state") - op.errors.extend(get_node_values("errors", child_name="error")) - - return op - - def __load(self, filename, uuid_be_dic=None): - """Loads the history from a file located in self.path/history/ - {filename}. The file should contain a serialized history - object in XML format. - """ - - # Ensure all previous information is discarded. - self.clear() + ca.append(cnode.childNodes[0].wholeText) + except (AttributeError, IndexError): + # There may be no childNodes, or + # wholeText may not be defined. + pass + + @staticmethod + def __load_operation_data(node, uuid_be_dic): + """Internal function to load the operation data from the given + XML 'node' object and return a _HistoryOperation object. + """ + op = _HistoryOperation() + op.name = node.getAttribute("name") + op.start_time = node.getAttribute("start_time") + op.end_time = node.getAttribute("end_time") + op.username = node.getAttribute("username") + op.userid = node.getAttribute("userid") + op.result = node.getAttribute("result").split(", ") + + if len(op.result) == 1: + op.result.append("None") + + # older clients simply wrote "Nothing to do" instead of + # "Ignored, Nothing to do", so work around that + if op.result[0] == "Nothing to do": + op.result = RESULT_NOTHING_TO_DO + + if node.hasAttribute("be_uuid"): + op.be_uuid = node.getAttribute("be_uuid") + if node.hasAttribute("new_be_uuid"): + op.new_be_uuid = node.getAttribute("new_be_uuid") + if node.hasAttribute("be"): + op.be = node.getAttribute("be") + if op.be_uuid: + op.current_be = uuid_be_dic.get(op.be_uuid, op.be) + if node.hasAttribute("new_be"): + op.new_be = node.getAttribute("new_be") + if op.new_be_uuid: + op.current_new_be = uuid_be_dic.get(op.new_be_uuid, op.new_be) + if node.hasAttribute("release-notes"): + op.release_notes = node.getAttribute("release-notes") + + def get_node_values(parent_name, child_name=None): + try: + parent = node.getElementsByTagName(parent_name)[0] + if child_name: + cnodes = parent.getElementsByTagName(child_name) + return [cnode.childNodes[0].wholeText for cnode in cnodes] + return parent.childNodes[0].wholeText + except (AttributeError, IndexError): + # Assume no values are present for the node. + pass + if child_name: + return [] + return + + op.start_state = get_node_values("start_state") + op.end_state = get_node_values("end_state") + op.errors.extend(get_node_values("errors", child_name="error")) + + return op + + def __load(self, filename, uuid_be_dic=None): + """Loads the history from a file located in self.path/history/ + {filename}. The file should contain a serialized history + object in XML format. + """ - try: - if not uuid_be_dic: - uuid_be_dic = bootenv.BootEnv.get_uuid_be_dic() - except apx.ApiException as e: - uuid_be_dic = {} + # Ensure all previous information is discarded. + self.clear() + + try: + if not uuid_be_dic: + uuid_be_dic = bootenv.BootEnv.get_uuid_be_dic() + except apx.ApiException as e: + uuid_be_dic = {} + + try: + pathname = os.path.join(self.path, filename) + d = xmini.parse(pathname) + root = d.documentElement + for cnode in root.childNodes: + if cnode.nodeName == "client": + self.__load_client_data(cnode) + elif cnode.nodeName == "operation": + # Operations load differently due to + # the stack. + self.__operations.append( + { + "pathname": pathname, + "operation": self.__load_operation_data( + cnode, uuid_be_dic + ), + } + ) + except KeyboardInterrupt: + raise + except Exception as e: + raise apx.HistoryLoadException(e) + + def __serialize_client_data(self, d): + """Internal function used to serialize current client data + using the supplied 'd' (xml.dom.minidom) object. + """ - try: - pathname = os.path.join(self.path, filename) - d = xmini.parse(pathname) - root = d.documentElement - for cnode in root.childNodes: - if cnode.nodeName == "client": - self.__load_client_data(cnode) - elif cnode.nodeName == "operation": - # Operations load differently due to - # the stack. - self.__operations.append({ - "pathname": pathname, - "operation": - self.__load_operation_data( - cnode, uuid_be_dic) - }) - except KeyboardInterrupt: - raise - except Exception as e: - raise apx.HistoryLoadException(e) - - def __serialize_client_data(self, d): - """Internal function used to serialize current client data - using the supplied 'd' (xml.dom.minidom) object. - """ - - assert self.client_name is not None - assert self.client_version is not None - - root = d.documentElement - client = d.createElement("client") - client.setAttribute("name", self.client_name) - client.setAttribute("version", self.client_version) - root.appendChild(client) - - if self.client_args: - args = d.createElement("args") - client.appendChild(args) - for entry in self.client_args: - arg = d.createElement("arg") - args.appendChild(arg) - arg.appendChild( - d.createCDATASection(str(entry))) - - def __serialize_operation_data(self, d): - """Internal function used to serialize current operation data - using the supplied 'd' (xml.dom.minidom) object. - """ - - if self.operation_userid is None: - raise apx.HistoryStoreException("Unable to determine " - "the id of the user that performed the current " - "operation; unable to store history information.") - elif self.operation_username is None: - raise apx.HistoryStoreException("Unable to determine " - "the username of the user that performed the " - "current operation; unable to store history " - "information.") - - root = d.documentElement - op = d.createElement("operation") - op.setAttribute("name", self.operation_name) - # Must explictly convert values to a string due to minidom bug - # that causes a fatal whenever using types other than str. - op.setAttribute("username", str(self.operation_username)) - op.setAttribute("userid", str(self.operation_userid)) - op.setAttribute("result", ", ".join(self.operation_result)) - op.setAttribute("start_time", self.operation_start_time) - op.setAttribute("end_time", self.operation_end_time) - - if self.operation_be: - op.setAttribute("be", self.operation_be) - if self.operation_be_uuid: - op.setAttribute("be_uuid", self.operation_be_uuid) - if self.operation_new_be: - op.setAttribute("new_be", self.operation_new_be) - if self.operation_new_be_uuid: - op.setAttribute("new_be_uuid", - self.operation_new_be_uuid) - if self.operation_snapshot: - op.setAttribute("snapshot", self.operation_snapshot) - if self.operation_release_notes: - op.setAttribute("release-notes", self.operation_release_notes) - - root.appendChild(op) - - if self.operation_start_state: - state = d.createElement("start_state") - op.appendChild(state) - state.appendChild(d.createCDATASection( - str(self.operation_start_state))) - - if self.operation_end_state: - state = d.createElement("end_state") - op.appendChild(state) - state.appendChild(d.createCDATASection( - str(self.operation_end_state))) - - if self.operation_errors: - errors = d.createElement("errors") - op.appendChild(errors) - - for entry in self.operation_errors: - error = d.createElement("error") - errors.appendChild(error) - error.appendChild( - d.createCDATASection(str(entry))) - - def __save(self): - """Serializes the current history information and writes it to - a file in self.path/{operation_start_time}-{sequence}.xml. - """ - d = xmini.Document() - d.appendChild(d.createElement("history")) - self.__serialize_client_data(d) - self.__serialize_operation_data(d) - - if not os.path.exists(self.path): - try: - # Only the right-most directory should be - # created. Assume that if the parent structure - # does not exist, it shouldn't be created. - os.mkdir(self.path, misc.PKG_DIR_MODE) - except EnvironmentError as e: - if e.errno not in (errno.EROFS, errno.EACCES, - errno.ENOENT): - # Ignore read-only file system and - # access errors as it isn't critical - # to the image that this data is - # written. - raise apx.HistoryStoreException(e) - # Return, since without the directory, the rest - # of this will fail. - return - except KeyboardInterrupt: - raise - except Exception as e: - raise apx.HistoryStoreException(e) - - # Repeatedly attempt to write the history (only if it's because - # the file already exists). This is necessary due to multiple - # operations possibly occuring within the same second (but not - # microsecond). - pathname = self.pathname - for i in range(1, 100): - try: - f = os.fdopen(os.open(pathname, - os.O_CREAT|os.O_EXCL|os.O_WRONLY, - misc.PKG_FILE_MODE), "w") - d.writexml(f, - encoding=sys.getdefaultencoding()) - f.close() - return - except EnvironmentError as e: - if e.errno == errno.EEXIST: - name, ext = os.path.splitext( - os.path.basename(pathname)) - name = name.split("-", 1)[0] - # Pick the next name in our sequence - # and try again. - pathname = os.path.join(self.path, - "{0}-{1:>02d}{2}".format(name, - i + 1, ext)) - continue - elif e.errno not in (errno.EROFS, - errno.EACCES): - # Ignore read-only file system and - # access errors as it isn't critical - # to the image that this data is - # written. - raise apx.HistoryStoreException(e) - # For all other failures, return, and avoid any - # further attempts. - return - except KeyboardInterrupt: - raise - except Exception as e: - raise apx.HistoryStoreException(e) - - def purge(self, be_name=None, be_uuid=None): - """Removes all history information by deleting the directory - indicated by the value self.path and then creates a new history - entry to record that this purge occurred. - """ - self.operation_name = "purge-history" - self.operation_be = be_name - self.operation_be_uuid = be_uuid + assert self.client_name is not None + assert self.client_version is not None + + root = d.documentElement + client = d.createElement("client") + client.setAttribute("name", self.client_name) + client.setAttribute("version", self.client_version) + root.appendChild(client) + + if self.client_args: + args = d.createElement("args") + client.appendChild(args) + for entry in self.client_args: + arg = d.createElement("arg") + args.appendChild(arg) + arg.appendChild(d.createCDATASection(str(entry))) + + def __serialize_operation_data(self, d): + """Internal function used to serialize current operation data + using the supplied 'd' (xml.dom.minidom) object. + """ - try: - shutil.rmtree(self.path) - except KeyboardInterrupt: - raise - except EnvironmentError as e: - if e.errno in (errno.ENOENT, errno.ESRCH): - # History already purged; record as successful. - self.operation_result = RESULT_SUCCEEDED - return - raise apx.HistoryPurgeException(e) - except Exception as e: - raise apx.HistoryPurgeException(e) - else: - self.operation_result = RESULT_SUCCEEDED - - def abort(self, result): - """Intended to be used by the client during top-level error - handling to indicate that an unrecoverable error occurred - during the current operation(s). This allows History to end - all of the current operations properly and handle any possible - errors that might be encountered in History itself. - """ - try: - # Ensure that all operations in the current stack are - # ended properly. - while self.operation_name: - self.operation_result = result - except apx.HistoryStoreException: - # Ignore storage errors as it's likely that whatever - # caused the client to abort() also caused the storage - # of the history information to fail. - return - - def log_operation_start(self, name, be_name=None, be_uuid=None): - """Marks the start of an operation to be recorded in image - history.""" - self.operation_name = name - self.operation_be = be_name - self.operation_be_uuid = be_uuid - - def log_operation_end(self, error=None, result=None, release_notes=None): - """Marks the end of an operation to be recorded in image - history. - - 'result' should be a pkg.client.history constant value - representing the outcome of an operation. If not provided, - and 'error' is provided, the final result of the operation will - be based on the class of 'error' and 'error' will be recorded - for the current operation. If 'result' and 'error' is not - provided, success is assumed.""" + if self.operation_userid is None: + raise apx.HistoryStoreException( + "Unable to determine " + "the id of the user that performed the current " + "operation; unable to store history information." + ) + elif self.operation_username is None: + raise apx.HistoryStoreException( + "Unable to determine " + "the username of the user that performed the " + "current operation; unable to store history " + "information." + ) + + root = d.documentElement + op = d.createElement("operation") + op.setAttribute("name", self.operation_name) + # Must explictly convert values to a string due to minidom bug + # that causes a fatal whenever using types other than str. + op.setAttribute("username", str(self.operation_username)) + op.setAttribute("userid", str(self.operation_userid)) + op.setAttribute("result", ", ".join(self.operation_result)) + op.setAttribute("start_time", self.operation_start_time) + op.setAttribute("end_time", self.operation_end_time) + + if self.operation_be: + op.setAttribute("be", self.operation_be) + if self.operation_be_uuid: + op.setAttribute("be_uuid", self.operation_be_uuid) + if self.operation_new_be: + op.setAttribute("new_be", self.operation_new_be) + if self.operation_new_be_uuid: + op.setAttribute("new_be_uuid", self.operation_new_be_uuid) + if self.operation_snapshot: + op.setAttribute("snapshot", self.operation_snapshot) + if self.operation_release_notes: + op.setAttribute("release-notes", self.operation_release_notes) + + root.appendChild(op) + + if self.operation_start_state: + state = d.createElement("start_state") + op.appendChild(state) + state.appendChild( + d.createCDATASection(str(self.operation_start_state)) + ) + + if self.operation_end_state: + state = d.createElement("end_state") + op.appendChild(state) + state.appendChild( + d.createCDATASection(str(self.operation_end_state)) + ) + + if self.operation_errors: + errors = d.createElement("errors") + op.appendChild(errors) + + for entry in self.operation_errors: + error = d.createElement("error") + errors.appendChild(error) + error.appendChild(d.createCDATASection(str(entry))) + + def __save(self): + """Serializes the current history information and writes it to + a file in self.path/{operation_start_time}-{sequence}.xml. + """ + d = xmini.Document() + d.appendChild(d.createElement("history")) + self.__serialize_client_data(d) + self.__serialize_operation_data(d) + + if not os.path.exists(self.path): + try: + # Only the right-most directory should be + # created. Assume that if the parent structure + # does not exist, it shouldn't be created. + os.mkdir(self.path, misc.PKG_DIR_MODE) + except EnvironmentError as e: + if e.errno not in (errno.EROFS, errno.EACCES, errno.ENOENT): + # Ignore read-only file system and + # access errors as it isn't critical + # to the image that this data is + # written. + raise apx.HistoryStoreException(e) + # Return, since without the directory, the rest + # of this will fail. + return + except KeyboardInterrupt: + raise + except Exception as e: + raise apx.HistoryStoreException(e) + + # Repeatedly attempt to write the history (only if it's because + # the file already exists). This is necessary due to multiple + # operations possibly occuring within the same second (but not + # microsecond). + pathname = self.pathname + for i in range(1, 100): + try: + f = os.fdopen( + os.open( + pathname, + os.O_CREAT | os.O_EXCL | os.O_WRONLY, + misc.PKG_FILE_MODE, + ), + "w", + ) + d.writexml(f, encoding=sys.getdefaultencoding()) + f.close() + return + except EnvironmentError as e: + if e.errno == errno.EEXIST: + name, ext = os.path.splitext(os.path.basename(pathname)) + name = name.split("-", 1)[0] + # Pick the next name in our sequence + # and try again. + pathname = os.path.join( + self.path, "{0}-{1:>02d}{2}".format(name, i + 1, ext) + ) + continue + elif e.errno not in (errno.EROFS, errno.EACCES): + # Ignore read-only file system and + # access errors as it isn't critical + # to the image that this data is + # written. + raise apx.HistoryStoreException(e) + # For all other failures, return, and avoid any + # further attempts. + return + except KeyboardInterrupt: + raise + except Exception as e: + raise apx.HistoryStoreException(e) + + def purge(self, be_name=None, be_uuid=None): + """Removes all history information by deleting the directory + indicated by the value self.path and then creates a new history + entry to record that this purge occurred. + """ + self.operation_name = "purge-history" + self.operation_be = be_name + self.operation_be_uuid = be_uuid + + try: + shutil.rmtree(self.path) + except KeyboardInterrupt: + raise + except EnvironmentError as e: + if e.errno in (errno.ENOENT, errno.ESRCH): + # History already purged; record as successful. + self.operation_result = RESULT_SUCCEEDED + return + raise apx.HistoryPurgeException(e) + except Exception as e: + raise apx.HistoryPurgeException(e) + else: + self.operation_result = RESULT_SUCCEEDED + + def abort(self, result): + """Intended to be used by the client during top-level error + handling to indicate that an unrecoverable error occurred + during the current operation(s). This allows History to end + all of the current operations properly and handle any possible + errors that might be encountered in History itself. + """ + try: + # Ensure that all operations in the current stack are + # ended properly. + while self.operation_name: + self.operation_result = result + except apx.HistoryStoreException: + # Ignore storage errors as it's likely that whatever + # caused the client to abort() also caused the storage + # of the history information to fail. + return + + def log_operation_start(self, name, be_name=None, be_uuid=None): + """Marks the start of an operation to be recorded in image + history.""" + self.operation_name = name + self.operation_be = be_name + self.operation_be_uuid = be_uuid + + def log_operation_end(self, error=None, result=None, release_notes=None): + """Marks the end of an operation to be recorded in image + history. + + 'result' should be a pkg.client.history constant value + representing the outcome of an operation. If not provided, + and 'error' is provided, the final result of the operation will + be based on the class of 'error' and 'error' will be recorded + for the current operation. If 'result' and 'error' is not + provided, success is assumed.""" + + if error: + self.log_operation_error(error) + self.operation_new_be = None + self.operation_new_be_uuid = None + + if error and not result: + try: + # Attempt get an exact error match first. + result = error_results[error.__class__] + except (AttributeError, KeyError): + # Failing an exact match, determine if this + # error is a subclass of an existing one. + for entry, val in six.iteritems(error_results): + if isinstance(error, entry): + result = val + break + if not result: + # If a result could still not be determined, + # assume unknown failure case. + result = RESULT_FAILED_UNKNOWN + elif not result: + # Assume success if no error and no result. + result = RESULT_SUCCEEDED + if release_notes: + self.operation_release_notes = release_notes + self.operation_result = result + + def log_operation_error(self, error): + """Adds an error to the list of errors to be recorded in image + history for the current operation.""" + + if self.operation_name: + out_stack = None + out_err = None + use_current_stack = True + if isinstance(error, Exception): + # Attempt to get the exception's stack trace + # from the stack. If the exception on the stack + # isn't the same object as the one being logged, + # then we have to use the current stack (which + # is somewhat less useful) instead of being able + # to find the code location of the original + # error. + type, val, tb = sys.exc_info() + if error == val: + output = traceback.format_exc() + use_current_stack = False + + if isinstance(error, six.string_types): + output = error + elif use_current_stack: + # Assume the current stack is more useful if + # the error doesn't inherit from Exception or + # we can't use the last exception's stack. + out_stack = "".join(traceback.format_stack()) if error: - self.log_operation_error(error) - self.operation_new_be = None - self.operation_new_be_uuid = None - - if error and not result: - try: - # Attempt get an exact error match first. - result = error_results[error.__class__] - except (AttributeError, KeyError): - # Failing an exact match, determine if this - # error is a subclass of an existing one. - for entry, val in six.iteritems(error_results): - if isinstance(error, entry): - result = val - break - if not result: - # If a result could still not be determined, - # assume unknown failure case. - result = RESULT_FAILED_UNKNOWN - elif not result: - # Assume success if no error and no result. - result = RESULT_SUCCEEDED - if release_notes: - self.operation_release_notes = release_notes - self.operation_result = result + # This may result in the text + # of the error itself being written + # twice, but that is necessary in case + # it is not contained within the + # output of format_exc(). + out_err = str(error) + if not out_err or out_err == "None": + out_err = error.__class__.__name__ + + output = "".join( + [item for item in [out_stack, out_err] if item] + ) + + self.operation_errors.append(output.strip()) + + def create_snapshot(self): + """Stores a snapshot of the current history and operation state + information in memory so that it can be restored in the event of + client failure (such as inability to store history information + or the failure of a boot environment operation). Each call to + this function will overwrite the previous snapshot.""" + + attrs = self.__snapshot = {} + for attr in ("root_dir", "client_name", "client_version"): + attrs[attr] = getattr(self, attr) + attrs["client_args"] = [copy.copy(a) for a in self.client_args] + # A deepcopy has to be performed here since this a list of dicts + # and not just History operation objects. + attrs["__operations"] = [copy.deepcopy(o) for o in self.__operations] + + def discard_snapshot(self): + """Discards the current history and operation state information + snapshot.""" + self.__snapshot = None + + def restore_snapshot(self): + """Restores the last snapshot taken of history and operation + state information completely discarding the existing history and + operation state information. If nothing exists to restore, this + this function will silently return.""" + + if not self.__snapshot: + return + + for name, val in six.iteritems(self.__snapshot): + if not name.startswith("__"): + object.__setattr__(self, name, val) + self.__operations = self.__snapshot["__operations"] - def log_operation_error(self, error): - """Adds an error to the list of errors to be recorded in image - history for the current operation.""" - - if self.operation_name: - out_stack = None - out_err = None - use_current_stack = True - if isinstance(error, Exception): - # Attempt to get the exception's stack trace - # from the stack. If the exception on the stack - # isn't the same object as the one being logged, - # then we have to use the current stack (which - # is somewhat less useful) instead of being able - # to find the code location of the original - # error. - type, val, tb = sys.exc_info() - if error == val: - output = traceback.format_exc() - use_current_stack = False - - if isinstance(error, six.string_types): - output = error - elif use_current_stack: - # Assume the current stack is more useful if - # the error doesn't inherit from Exception or - # we can't use the last exception's stack. - out_stack = "".join(traceback.format_stack()) - - if error: - # This may result in the text - # of the error itself being written - # twice, but that is necessary in case - # it is not contained within the - # output of format_exc(). - out_err = str(error) - if not out_err or out_err == "None": - out_err = \ - error.__class__.__name__ - - output = "".join([ - item for item in [out_stack, out_err] - if item - ]) - - self.operation_errors.append(output.strip()) - - def create_snapshot(self): - """Stores a snapshot of the current history and operation state - information in memory so that it can be restored in the event of - client failure (such as inability to store history information - or the failure of a boot environment operation). Each call to - this function will overwrite the previous snapshot.""" - - attrs = self.__snapshot = {} - for attr in ("root_dir", "client_name", "client_version"): - attrs[attr] = getattr(self, attr) - attrs["client_args"] = [copy.copy(a) for a in self.client_args] - # A deepcopy has to be performed here since this a list of dicts - # and not just History operation objects. - attrs["__operations"] = \ - [copy.deepcopy(o) for o in self.__operations] - - def discard_snapshot(self): - """Discards the current history and operation state information - snapshot.""" - self.__snapshot = None - - def restore_snapshot(self): - """Restores the last snapshot taken of history and operation - state information completely discarding the existing history and - operation state information. If nothing exists to restore, this - this function will silently return.""" - - if not self.__snapshot: - return - - for name, val in six.iteritems(self.__snapshot): - if not name.startswith("__"): - object.__setattr__(self, name, val) - self.__operations = self.__snapshot["__operations"] # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/image.py b/src/modules/client/image.py index d77eb3ff7..48c064573 100644 --- a/src/modules/client/image.py +++ b/src/modules/client/image.py @@ -49,35 +49,36 @@ import pkg.actions import pkg.catalog -import pkg.client.api_errors as apx -import pkg.client.bootenv as bootenv -import pkg.client.history as history -import pkg.client.imageconfig as imageconfig -import pkg.client.imageplan as imageplan -import pkg.client.linkedimage as li -import pkg.client.pkgdefs as pkgdefs -import pkg.client.pkgplan as pkgplan -import pkg.client.plandesc as plandesc -import pkg.client.progress as progress -import pkg.client.publisher as publisher -import pkg.client.sigpolicy as sigpolicy -import pkg.client.transport.transport as transport -import pkg.config as cfg -import pkg.file_layout.layout as fl +import pkg.client.api_errors as apx +import pkg.client.bootenv as bootenv +import pkg.client.history as history +import pkg.client.imageconfig as imageconfig +import pkg.client.imageplan as imageplan +import pkg.client.linkedimage as li +import pkg.client.pkgdefs as pkgdefs +import pkg.client.pkgplan as pkgplan +import pkg.client.plandesc as plandesc +import pkg.client.progress as progress +import pkg.client.publisher as publisher +import pkg.client.sigpolicy as sigpolicy +import pkg.client.transport.transport as transport +import pkg.config as cfg +import pkg.file_layout.layout as fl import pkg.fmri -import pkg.json as json -import pkg.lockfile as lockfile -import pkg.manifest as manifest -import pkg.mediator as med -import pkg.misc as misc +import pkg.json as json +import pkg.lockfile as lockfile +import pkg.manifest as manifest +import pkg.mediator as med +import pkg.misc as misc import pkg.nrlock -import pkg.pkgsubprocess as subprocess -import pkg.portable as portable +import pkg.pkgsubprocess as subprocess +import pkg.portable as portable import pkg.server.catalog -import pkg.smf as smf +import pkg.smf as smf import pkg.version from pkg.client import global_settings + logger = global_settings.logger from pkg.client.debugvalues import DebugValues @@ -90,5031 +91,5264 @@ IMG_PUB_DIR = "publisher" -class Image(object): - """An Image object is a directory tree containing the laid-down contents - of a self-consistent graph of Packages. - - An Image has a root path. - - An Image of type IMG_ENTIRE does not have a parent Image. Other Image - types must have a parent Image. The external state of the parent Image - must be accessible from the Image's context, or duplicated within the - Image (IMG_PARTIAL for zones, for instance). - - The parent of a user Image can be a partial Image. The parent of a - partial Image must be an entire Image. - - An Image of type IMG_USER stores its external state at self.root + - ".org.opensolaris,pkg". - - An Image of type IMG_ENTIRE or IMG_PARTIAL stores its external state at - self.root + "/var/pkg". - An Image needs to be able to have a different repository set than the - system's root Image. - - For image format details, see section 5.3 of doc/on-disk-format.txt - in the pkg(7) gate. - """ - - # Class constants - CURRENT_VERSION = 4 - IMG_CATALOG_KNOWN = "known" - IMG_CATALOG_INSTALLED = "installed" - - __STATE_UPDATING_FILE = "state_updating" - - def __init__(self, root, user_provided_dir=False, progtrack=None, - should_exist=True, imgtype=None, force=False, - augment_ta_from_parent_image=True, allow_ondisk_upgrade=None, - props=misc.EmptyDict, cmdpath=None): - - if should_exist: - assert(imgtype is None) - assert(not force) - else: - assert(imgtype is not None) - - # Alternate package sources. - self.__alt_pkg_pub_map = None - self.__alt_pubs = None - self.__alt_known_cat = None - self.__alt_pkg_sources_loaded = False - - # Determine identity of client executable if appropriate. - if cmdpath == None: - cmdpath = misc.api_cmdpath() - self.cmdpath = cmdpath - - if self.cmdpath != None: - self.__cmddir = os.path.dirname(cmdpath) - - # prevent brokeness in the test suite - if self.cmdpath and \ - "PKG_NO_RUNPY_CMDPATH" in os.environ and \ - self.cmdpath.endswith(os.sep + "run.py"): - raise RuntimeError(""" +class Image(object): + """An Image object is a directory tree containing the laid-down contents + of a self-consistent graph of Packages. + + An Image has a root path. + + An Image of type IMG_ENTIRE does not have a parent Image. Other Image + types must have a parent Image. The external state of the parent Image + must be accessible from the Image's context, or duplicated within the + Image (IMG_PARTIAL for zones, for instance). + + The parent of a user Image can be a partial Image. The parent of a + partial Image must be an entire Image. + + An Image of type IMG_USER stores its external state at self.root + + ".org.opensolaris,pkg". + + An Image of type IMG_ENTIRE or IMG_PARTIAL stores its external state at + self.root + "/var/pkg". + + An Image needs to be able to have a different repository set than the + system's root Image. + + For image format details, see section 5.3 of doc/on-disk-format.txt + in the pkg(7) gate. + """ + + # Class constants + CURRENT_VERSION = 4 + IMG_CATALOG_KNOWN = "known" + IMG_CATALOG_INSTALLED = "installed" + + __STATE_UPDATING_FILE = "state_updating" + + def __init__( + self, + root, + user_provided_dir=False, + progtrack=None, + should_exist=True, + imgtype=None, + force=False, + augment_ta_from_parent_image=True, + allow_ondisk_upgrade=None, + props=misc.EmptyDict, + cmdpath=None, + ): + if should_exist: + assert imgtype is None + assert not force + else: + assert imgtype is not None + + # Alternate package sources. + self.__alt_pkg_pub_map = None + self.__alt_pubs = None + self.__alt_known_cat = None + self.__alt_pkg_sources_loaded = False + + # Determine identity of client executable if appropriate. + if cmdpath == None: + cmdpath = misc.api_cmdpath() + self.cmdpath = cmdpath + + if self.cmdpath != None: + self.__cmddir = os.path.dirname(cmdpath) + + # prevent brokeness in the test suite + if ( + self.cmdpath + and "PKG_NO_RUNPY_CMDPATH" in os.environ + and self.cmdpath.endswith(os.sep + "run.py") + ): + raise RuntimeError( + """ An Image object was allocated from within ipkg test suite and cmdpath was not explicitly overridden. Please make sure to explicitly set cmdpath when allocating an Image object, or override cmdpath when allocating an Image object by setting PKG_CMDPATH -in the environment or by setting simulate_cmdpath in DebugValues.""") - - self.linked = None - - # Indicates whether automatic image format upgrades of the - # on-disk format are allowed. - self.allow_ondisk_upgrade = allow_ondisk_upgrade - self.__upgraded = False - - # Must happen after upgraded assignment. - self.__init_catalogs() - - self.__imgdir = None - self.__root = root - - self.blocking_locks = False - self.cfg = None - self.history = history.History() - self.imageplan = None - self.img_prefix = None - self.index_dir = None - self.plandir = None - self.version = -1 - - # Can have multiple read cache dirs... - self.__read_cache_dirs = [] - - # ...but only one global write cache dir and incoming write dir. - self.__write_cache_dir = None - self.__user_cache_dir = None - self._incoming_cache_dir = None - - # Set if write_cache is actually a tree like /var/pkg/publisher - # instead of a flat cache. - self.__write_cache_root = None - - self.__lock = pkg.nrlock.NRLock() - self.__lockfile = None - self.__sig_policy = None - self.__trust_anchors = None - self.__bad_trust_anchors = [] - - # cache for presence of boot-archive - self.__boot_archive = None - - # When users and groups are added before their database files - # have been installed, the actions store them temporarily in the - # image, in these members. - self._users = set() - self._groups = set() - self._usersbyname = {} - self._groupsbyname = {} - - # Set of pkg stems being avoided per configuration. - self.__avoid_set = None - self.__avoid_set_altered = False - - # Set of pkg stems being avoided by solver due to dependency - # constraints (not administrative action). - self.__implicit_avoid_set = None - - # set of pkg stems subject to group - # dependency but removed because obsolete - self.__group_obsolete = None - - # The action dictionary that's returned by __load_actdict. - self.__actdict = None - self.__actdict_timestamp = None - - self.__property_overrides = { "property": props } - - # Transport operations for this image - self.transport = transport.Transport( - transport.ImageTransportCfg(self)) - - self.linked = li.LinkedImage(self) - - if should_exist: - self.find_root(self.root, user_provided_dir, - progtrack) - else: - if not force and self.image_type(self.root) != None: - raise apx.ImageAlreadyExists(self.root) - if not force and os.path.exists(self.root): - # ignore .zfs snapdir if it's present - snapdir = os.path.join(self.root, ".zfs") - listdir = set(os.listdir(self.root)) - if os.path.isdir(snapdir): - listdir -= set([".zfs"]) - if len(listdir) > 0: - raise apx.CreatingImageInNonEmptyDir( - self.root) - self.__set_dirs(root=self.root, imgtype=imgtype, - progtrack=progtrack, purge=True) - - # right now we don't explicitly set dir/file modes everywhere; - # set umask to proper value to prevent problems w/ overly - # locked down umask. - os.umask(0o022) - - self.augment_ta_from_parent_image = augment_ta_from_parent_image - - def __catalog_loaded(self, name): - """Returns a boolean value indicating whether the named catalog - has already been loaded. This is intended to be used as an - optimization function to determine which catalog to request.""" - - return name in self.__catalogs - - def __init_catalogs(self): - """Initializes default catalog state. Actual data is provided - on demand via get_catalog()""" - - if self.__upgraded and self.version < 3: - # Ignore request; transformed catalog data only exists - # in memory and can't be reloaded from disk. - return - - # This is used to cache image catalogs. - self.__catalogs = {} - self.__alt_pkg_sources_loaded = False - - @staticmethod - def alloc(*args, **kwargs): - return Image(*args, **kwargs) - - @property - def imgdir(self): - """The absolute path of the image's metadata.""" - return self.__imgdir - - @property - def locked(self): - """A boolean value indicating whether the image is currently - locked.""" - - return self.__lock and self.__lock.locked - - @property - def root(self): - """The absolute path of the image's location.""" - return self.__root - - @property - def signature_policy(self): - """The current signature policy for this image.""" - - if self.__sig_policy is not None: - return self.__sig_policy - txt = self.cfg.get_policy_str(imageconfig.SIGNATURE_POLICY) - names = self.cfg.get_property("property", - "signature-required-names") - self.__sig_policy = sigpolicy.Policy.policy_factory(txt, names) - return self.__sig_policy - - @property - def trust_anchors(self): - """A dictionary mapping subject hashes for certificates this - image trusts to those certs. The image trusts the trust anchors - in its trust_anchor_dir and those in the image from which the - client was run.""" - - if self.__trust_anchors is not None: - return self.__trust_anchors - - user_set_ta_loc = True - rel_dir = self.get_property("trust-anchor-directory") - if rel_dir[0] == "/": - rel_dir = rel_dir[1:] - trust_anchor_loc = os.path.join(self.root, rel_dir) - loc_is_dir = os.path.isdir(trust_anchor_loc) - pkg_trust_anchors = {} - if self.__cmddir and self.augment_ta_from_parent_image: - pkg_trust_anchors = Image(self.__cmddir, - augment_ta_from_parent_image=False, - cmdpath=self.cmdpath).trust_anchors - if not loc_is_dir and os.path.exists(trust_anchor_loc): - raise apx.InvalidPropertyValue(_("The trust " - "anchors for the image were expected to be found " - "in {0}, but that is not a directory. Please set " - "the image property 'trust-anchor-directory' to " - "the correct path.").format(trust_anchor_loc)) - self.__trust_anchors = {} - if loc_is_dir: - for fn in os.listdir(trust_anchor_loc): - pth = os.path.join(trust_anchor_loc, fn) - if os.path.islink(pth): - continue - try: - with open(pth, "rb") as f: - raw = f.read() - trusted_ca = \ - x509.load_pem_x509_certificate( - raw, default_backend()) - except (ValueError, IOError) as e: - self.__bad_trust_anchors.append( - (pth, str(e))) - else: - # We store certificates internally by - # the SHA-1 hash of its subject. - s = hashlib.sha1(misc.force_bytes( - trusted_ca.subject)).hexdigest() - self.__trust_anchors.setdefault(s, []) - self.__trust_anchors[s].append( - trusted_ca) - for s in pkg_trust_anchors: - if s not in self.__trust_anchors: - self.__trust_anchors[s] = pkg_trust_anchors[s] - return self.__trust_anchors - - @property - def bad_trust_anchors(self): - """A list of strings decribing errors encountered while parsing - trust anchors.""" - - return [_("{path} is expected to be a certificate but could " - "not be parsed. The error encountered " - "was:\n\t{err}").format(path=p, err=e) - for p, e in self.__bad_trust_anchors - ] - - @property - def write_cache_path(self): - """The path to the filesystem that holds the write cache--used - to compute whether sufficent space is available for - downloads.""" - - return self.__user_cache_dir or \ - os.path.join(self.imgdir, IMG_PUB_DIR) - - @contextmanager - def locked_op(self, op, allow_unprivileged=False, new_history_op=True): - """Helper method for executing an image-modifying operation - that needs locking. It automatically handles calling - log_operation_start and log_operation_end by default. Locking - behaviour is controlled by the blocking_locks image property. - - 'allow_unprivileged' is an optional boolean value indicating - that permissions-related exceptions should be ignored when - attempting to obtain the lock as the related operation will - still work correctly even though the image cannot (presumably) - be modified. - - 'new_history_op' indicates whether we should handle history - operations. - """ - - error = None - self.lock(allow_unprivileged=allow_unprivileged) +in the environment or by setting simulate_cmdpath in DebugValues.""" + ) + + self.linked = None + + # Indicates whether automatic image format upgrades of the + # on-disk format are allowed. + self.allow_ondisk_upgrade = allow_ondisk_upgrade + self.__upgraded = False + + # Must happen after upgraded assignment. + self.__init_catalogs() + + self.__imgdir = None + self.__root = root + + self.blocking_locks = False + self.cfg = None + self.history = history.History() + self.imageplan = None + self.img_prefix = None + self.index_dir = None + self.plandir = None + self.version = -1 + + # Can have multiple read cache dirs... + self.__read_cache_dirs = [] + + # ...but only one global write cache dir and incoming write dir. + self.__write_cache_dir = None + self.__user_cache_dir = None + self._incoming_cache_dir = None + + # Set if write_cache is actually a tree like /var/pkg/publisher + # instead of a flat cache. + self.__write_cache_root = None + + self.__lock = pkg.nrlock.NRLock() + self.__lockfile = None + self.__sig_policy = None + self.__trust_anchors = None + self.__bad_trust_anchors = [] + + # cache for presence of boot-archive + self.__boot_archive = None + + # When users and groups are added before their database files + # have been installed, the actions store them temporarily in the + # image, in these members. + self._users = set() + self._groups = set() + self._usersbyname = {} + self._groupsbyname = {} + + # Set of pkg stems being avoided per configuration. + self.__avoid_set = None + self.__avoid_set_altered = False + + # Set of pkg stems being avoided by solver due to dependency + # constraints (not administrative action). + self.__implicit_avoid_set = None + + # set of pkg stems subject to group + # dependency but removed because obsolete + self.__group_obsolete = None + + # The action dictionary that's returned by __load_actdict. + self.__actdict = None + self.__actdict_timestamp = None + + self.__property_overrides = {"property": props} + + # Transport operations for this image + self.transport = transport.Transport(transport.ImageTransportCfg(self)) + + self.linked = li.LinkedImage(self) + + if should_exist: + self.find_root(self.root, user_provided_dir, progtrack) + else: + if not force and self.image_type(self.root) != None: + raise apx.ImageAlreadyExists(self.root) + if not force and os.path.exists(self.root): + # ignore .zfs snapdir if it's present + snapdir = os.path.join(self.root, ".zfs") + listdir = set(os.listdir(self.root)) + if os.path.isdir(snapdir): + listdir -= set([".zfs"]) + if len(listdir) > 0: + raise apx.CreatingImageInNonEmptyDir(self.root) + self.__set_dirs( + root=self.root, imgtype=imgtype, progtrack=progtrack, purge=True + ) + + # right now we don't explicitly set dir/file modes everywhere; + # set umask to proper value to prevent problems w/ overly + # locked down umask. + os.umask(0o022) + + self.augment_ta_from_parent_image = augment_ta_from_parent_image + + def __catalog_loaded(self, name): + """Returns a boolean value indicating whether the named catalog + has already been loaded. This is intended to be used as an + optimization function to determine which catalog to request.""" + + return name in self.__catalogs + + def __init_catalogs(self): + """Initializes default catalog state. Actual data is provided + on demand via get_catalog()""" + + if self.__upgraded and self.version < 3: + # Ignore request; transformed catalog data only exists + # in memory and can't be reloaded from disk. + return + + # This is used to cache image catalogs. + self.__catalogs = {} + self.__alt_pkg_sources_loaded = False + + @staticmethod + def alloc(*args, **kwargs): + return Image(*args, **kwargs) + + @property + def imgdir(self): + """The absolute path of the image's metadata.""" + return self.__imgdir + + @property + def locked(self): + """A boolean value indicating whether the image is currently + locked.""" + + return self.__lock and self.__lock.locked + + @property + def root(self): + """The absolute path of the image's location.""" + return self.__root + + @property + def signature_policy(self): + """The current signature policy for this image.""" + + if self.__sig_policy is not None: + return self.__sig_policy + txt = self.cfg.get_policy_str(imageconfig.SIGNATURE_POLICY) + names = self.cfg.get_property("property", "signature-required-names") + self.__sig_policy = sigpolicy.Policy.policy_factory(txt, names) + return self.__sig_policy + + @property + def trust_anchors(self): + """A dictionary mapping subject hashes for certificates this + image trusts to those certs. The image trusts the trust anchors + in its trust_anchor_dir and those in the image from which the + client was run.""" + + if self.__trust_anchors is not None: + return self.__trust_anchors + + user_set_ta_loc = True + rel_dir = self.get_property("trust-anchor-directory") + if rel_dir[0] == "/": + rel_dir = rel_dir[1:] + trust_anchor_loc = os.path.join(self.root, rel_dir) + loc_is_dir = os.path.isdir(trust_anchor_loc) + pkg_trust_anchors = {} + if self.__cmddir and self.augment_ta_from_parent_image: + pkg_trust_anchors = Image( + self.__cmddir, + augment_ta_from_parent_image=False, + cmdpath=self.cmdpath, + ).trust_anchors + if not loc_is_dir and os.path.exists(trust_anchor_loc): + raise apx.InvalidPropertyValue( + _( + "The trust " + "anchors for the image were expected to be found " + "in {0}, but that is not a directory. Please set " + "the image property 'trust-anchor-directory' to " + "the correct path." + ).format(trust_anchor_loc) + ) + self.__trust_anchors = {} + if loc_is_dir: + for fn in os.listdir(trust_anchor_loc): + pth = os.path.join(trust_anchor_loc, fn) + if os.path.islink(pth): + continue try: - be_name, be_uuid = \ - bootenv.BootEnv.get_be_name(self.root) - if new_history_op: - self.history.log_operation_start(op, - be_name=be_name, be_uuid=be_uuid) - yield - except apx.ImageLockedError as e: - # Don't unlock the image if the call failed to - # get the lock. - error = e - raise - except Exception as e: - error = e - self.unlock() - raise + with open(pth, "rb") as f: + raw = f.read() + trusted_ca = x509.load_pem_x509_certificate( + raw, default_backend() + ) + except (ValueError, IOError) as e: + self.__bad_trust_anchors.append((pth, str(e))) else: - self.unlock() - finally: - if new_history_op: - self.history.log_operation_end(error=error) - - def lock(self, allow_unprivileged=False): - """Locks the image in preparation for an image-modifying - operation. Raises an ImageLockedError exception on failure. - Locking behaviour is controlled by the blocking_locks image - property. - - 'allow_unprivileged' is an optional boolean value indicating - that permissions-related exceptions should be ignored when - attempting to obtain the lock as the related operation will - still work correctly even though the image cannot (presumably) - be modified. - """ - - blocking = self.blocking_locks - - # First, attempt to obtain a thread lock. - if not self.__lock.acquire(blocking=blocking): - raise apx.ImageLockedError() - - try: - # Attempt to obtain a file lock. - self.__lockfile.lock(blocking=blocking) - except EnvironmentError as e: - exc = None - if e.errno == errno.ENOENT: - return - if e.errno == errno.EACCES: - exc = apx.UnprivilegedUserError(e.filename) - elif e.errno == errno.EROFS: - exc = apx.ReadOnlyFileSystemException( - e.filename) - elif e.errno in (errno.ENOSPC, errno.EDQUOT): - # Failed due to space constraint - raise e - else: - self.__lock.release() - raise - - if exc and not allow_unprivileged: - self.__lock.release() - raise exc - except: - # If process lock fails, ensure thread lock is released. - self.__lock.release() - raise - - def unlock(self): - """Unlocks the image.""" - - try: - if self.__lockfile: - self.__lockfile.unlock() - finally: - self.__lock.release() - - def image_type(self, d): - """Returns the type of image at directory: d; or None""" - rv = None - - def is_image(sub_d, prefix): - # First check for new image configuration file. - if os.path.isfile(os.path.join(sub_d, prefix, - "pkg5.image")): - # Regardless of directory structure, assume - # this is an image for now. - return True - - if not os.path.isfile(os.path.join(sub_d, prefix, - "cfg_cache")): - # For older formats, if configuration is - # missing, this can't be an image. - return False - - # Configuration exists, but for older formats, - # all of these directories have to exist. - for n in ("state", "pkg"): - if not os.path.isdir(os.path.join(sub_d, prefix, - n)): - return False - - return True - - if os.path.isdir(os.path.join(d, img_user_prefix)) and \ - is_image(d, img_user_prefix): - rv = IMG_USER - elif os.path.isdir(os.path.join(d, img_root_prefix)) and \ - is_image(d, img_root_prefix): - rv = IMG_ENTIRE - return rv - - def find_root(self, d, exact_match=False, progtrack=None): - # Ascend from the given directory d to find first - # encountered image. If exact_match is true, if the - # image found doesn't match startd, raise an - # ImageNotFoundException. - - startd = d - # eliminate problem if relative path such as "." is passed in - d = os.path.realpath(d) - - while True: - imgtype = self.image_type(d) - if imgtype in (IMG_USER, IMG_ENTIRE): - if exact_match and \ - os.path.realpath(startd) != \ - os.path.realpath(d): - raise apx.ImageNotFoundException( - exact_match, startd, d) - self.__set_dirs(imgtype=imgtype, root=d, - startd=startd, progtrack=progtrack) - return - - # XXX follow symlinks or not? - oldpath = d - d = os.path.normpath(os.path.join(d, os.path.pardir)) - - # Make sure we are making progress and aren't in an - # infinite loop. - # - # (XXX - Need to deal with symlinks here too) - if d == oldpath: - raise apx.ImageNotFoundException( - exact_match, startd, d) - - def __load_config(self): - """Load this image's cached configuration from the default - location. This function should not be called anywhere other - than __set_dirs().""" - - # XXX Incomplete with respect to doc/image.txt description of - # configuration. - - if self.root == None: - raise RuntimeError("self.root must be set") - - version = None - if self.version > -1: - if self.version >= 3: - # Configuration version is currently 3 - # for all v3 images and newer. - version = 3 - else: - version = self.version - - self.cfg = imageconfig.ImageConfig(self.__cfgpathname, - self.root, version=version, - overrides=self.__property_overrides) - - if self.__upgraded: - self.cfg = imageconfig.BlendedConfig(self.cfg, - self.get_catalog(self.IMG_CATALOG_INSTALLED).\ - get_package_counts_by_pub(), - self.imgdir, self.transport, - self.cfg.get_policy("use-system-repo")) - - if self.cfg.version == imageconfig.CURRENT_VERSION: - for keyf in self.get_property(imageconfig.KEY_FILES): - if not os.path.exists( - self.root + os.path.sep + keyf): - raise apx.ImageMissingKeyFile(keyf) + # We store certificates internally by + # the SHA-1 hash of its subject. + s = hashlib.sha1( + misc.force_bytes(trusted_ca.subject) + ).hexdigest() + self.__trust_anchors.setdefault(s, []) + self.__trust_anchors[s].append(trusted_ca) + for s in pkg_trust_anchors: + if s not in self.__trust_anchors: + self.__trust_anchors[s] = pkg_trust_anchors[s] + return self.__trust_anchors + + @property + def bad_trust_anchors(self): + """A list of strings decribing errors encountered while parsing + trust anchors.""" + + return [ + _( + "{path} is expected to be a certificate but could " + "not be parsed. The error encountered " + "was:\n\t{err}" + ).format(path=p, err=e) + for p, e in self.__bad_trust_anchors + ] + + @property + def write_cache_path(self): + """The path to the filesystem that holds the write cache--used + to compute whether sufficent space is available for + downloads.""" + + return self.__user_cache_dir or os.path.join(self.imgdir, IMG_PUB_DIR) + + @contextmanager + def locked_op(self, op, allow_unprivileged=False, new_history_op=True): + """Helper method for executing an image-modifying operation + that needs locking. It automatically handles calling + log_operation_start and log_operation_end by default. Locking + behaviour is controlled by the blocking_locks image property. + + 'allow_unprivileged' is an optional boolean value indicating + that permissions-related exceptions should be ignored when + attempting to obtain the lock as the related operation will + still work correctly even though the image cannot (presumably) + be modified. + + 'new_history_op' indicates whether we should handle history + operations. + """ - self.__load_publisher_ssl() + error = None + self.lock(allow_unprivileged=allow_unprivileged) + try: + be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root) + if new_history_op: + self.history.log_operation_start( + op, be_name=be_name, be_uuid=be_uuid + ) + yield + except apx.ImageLockedError as e: + # Don't unlock the image if the call failed to + # get the lock. + error = e + raise + except Exception as e: + error = e + self.unlock() + raise + else: + self.unlock() + finally: + if new_history_op: + self.history.log_operation_end(error=error) + + def lock(self, allow_unprivileged=False): + """Locks the image in preparation for an image-modifying + operation. Raises an ImageLockedError exception on failure. + Locking behaviour is controlled by the blocking_locks image + property. + + 'allow_unprivileged' is an optional boolean value indicating + that permissions-related exceptions should be ignored when + attempting to obtain the lock as the related operation will + still work correctly even though the image cannot (presumably) + be modified. + """ - def __store_publisher_ssl(self): - """Normalizes publisher SSL configuration data, storing any - certificate files as needed in the image's SSL directory. This - logic is performed here in the image instead of ImageConfig as - it relies on special knowledge of the image structure.""" + blocking = self.blocking_locks - ssl_dir = os.path.join(self.imgdir, "ssl") + # First, attempt to obtain a thread lock. + if not self.__lock.acquire(blocking=blocking): + raise apx.ImageLockedError() - def store_ssl_file(src): - try: - if not src or not os.path.exists(src): - # If SSL file doesn't exist (for - # whatever reason), then don't update - # configuration. (Let the failure - # happen later during an operation - # that requires the file.) - return - except EnvironmentError as e: - raise apx._convert_error(e) - - # Ensure ssl_dir exists; makedirs handles any errors. - misc.makedirs(ssl_dir) + try: + # Attempt to obtain a file lock. + self.__lockfile.lock(blocking=blocking) + except EnvironmentError as e: + exc = None + if e.errno == errno.ENOENT: + return + if e.errno == errno.EACCES: + exc = apx.UnprivilegedUserError(e.filename) + elif e.errno == errno.EROFS: + exc = apx.ReadOnlyFileSystemException(e.filename) + elif e.errno in (errno.ENOSPC, errno.EDQUOT): + # Failed due to space constraint + raise e + else: + self.__lock.release() + raise + + if exc and not allow_unprivileged: + self.__lock.release() + raise exc + except: + # If process lock fails, ensure thread lock is released. + self.__lock.release() + raise + + def unlock(self): + """Unlocks the image.""" + + try: + if self.__lockfile: + self.__lockfile.unlock() + finally: + self.__lock.release() + + def image_type(self, d): + """Returns the type of image at directory: d; or None""" + rv = None + + def is_image(sub_d, prefix): + # First check for new image configuration file. + if os.path.isfile(os.path.join(sub_d, prefix, "pkg5.image")): + # Regardless of directory structure, assume + # this is an image for now. + return True - try: - # Destination name is based on digest of file. - # In order for this image to interoperate with - # older and newer clients, we must use sha-1 - # here. - dest = os.path.join(ssl_dir, - misc.get_data_digest(src, - hash_func=hashlib.sha1)[0]) - if src != dest: - portable.copyfile(src, dest) - - # Ensure file can be read by unprivileged users. - os.chmod(dest, misc.PKG_FILE_MODE) - except EnvironmentError as e: - raise apx._convert_error(e) - return dest - - for pub in self.cfg.publishers.values(): - # self.cfg.publishers is used because gen_publishers - # includes temporary publishers and this is only for - # configured ones. - repo = pub.repository - if not repo: - continue + if not os.path.isfile(os.path.join(sub_d, prefix, "cfg_cache")): + # For older formats, if configuration is + # missing, this can't be an image. + return False - # Store and normalize ssl_cert and ssl_key. - for u in repo.origins + repo.mirrors: - for prop in ("ssl_cert", "ssl_key"): - pval = getattr(u, prop) - if pval: - pval = store_ssl_file(pval) - if not pval: - continue - # Store path as absolute to image root, - # it will be corrected on load to match - # actual image location if needed. - setattr(u, prop, - os.path.splitdrive(self.root)[0] + - os.path.sep + - misc.relpath(pval, start=self.root)) - - def __load_publisher_ssl(self): - """Should be called every time image configuration is loaded; - ensure ssl_cert and ssl_key properties of publisher repository - URI objects match current image location.""" - - ssl_dir = os.path.join(self.imgdir, "ssl") - - for pub in self.cfg.publishers.values(): - # self.cfg.publishers is used because gen_publishers - # includes temporary publishers and this is only for - # configured ones. - repo = pub.repository - if not repo: - continue + # Configuration exists, but for older formats, + # all of these directories have to exist. + for n in ("state", "pkg"): + if not os.path.isdir(os.path.join(sub_d, prefix, n)): + return False + + return True + + if os.path.isdir(os.path.join(d, img_user_prefix)) and is_image( + d, img_user_prefix + ): + rv = IMG_USER + elif os.path.isdir(os.path.join(d, img_root_prefix)) and is_image( + d, img_root_prefix + ): + rv = IMG_ENTIRE + return rv + + def find_root(self, d, exact_match=False, progtrack=None): + # Ascend from the given directory d to find first + # encountered image. If exact_match is true, if the + # image found doesn't match startd, raise an + # ImageNotFoundException. + + startd = d + # eliminate problem if relative path such as "." is passed in + d = os.path.realpath(d) + + while True: + imgtype = self.image_type(d) + if imgtype in (IMG_USER, IMG_ENTIRE): + if exact_match and os.path.realpath(startd) != os.path.realpath( + d + ): + raise apx.ImageNotFoundException(exact_match, startd, d) + self.__set_dirs( + imgtype=imgtype, root=d, startd=startd, progtrack=progtrack + ) + return - for u in repo.origins + repo.mirrors: - for prop in ("ssl_cert", "ssl_key"): - pval = getattr(u, prop) - if not pval: - continue - if not os.path.join(self.img_prefix, - "ssl") in os.path.dirname(pval): - continue - # If special image directory is part - # of path, then assume path should be - # rewritten to match current image - # location. - setattr(u, prop, os.path.join(ssl_dir, - os.path.basename(pval))) - - def update_last_modified(self): - """Update $imgdir/modified timestamp for image; should be - called after any image modification has completed. This - provides a public interface for programs that want to monitor - the image for modifications via event ports, etc.""" - - # This is usually /var/pkg/modified. - fname = os.path.join(self.imgdir, "modified") - try: - with os.fdopen( - os.open(fname, os.O_CREAT|os.O_NOFOLLOW, - misc.PKG_FILE_MODE)) as f: - os.utime(fname, None) - except EnvironmentError as e: - raise apx._convert_error(e) + # XXX follow symlinks or not? + oldpath = d + d = os.path.normpath(os.path.join(d, os.path.pardir)) + + # Make sure we are making progress and aren't in an + # infinite loop. + # + # (XXX - Need to deal with symlinks here too) + if d == oldpath: + raise apx.ImageNotFoundException(exact_match, startd, d) + + def __load_config(self): + """Load this image's cached configuration from the default + location. This function should not be called anywhere other + than __set_dirs().""" + + # XXX Incomplete with respect to doc/image.txt description of + # configuration. + + if self.root == None: + raise RuntimeError("self.root must be set") + + version = None + if self.version > -1: + if self.version >= 3: + # Configuration version is currently 3 + # for all v3 images and newer. + version = 3 + else: + version = self.version + + self.cfg = imageconfig.ImageConfig( + self.__cfgpathname, + self.root, + version=version, + overrides=self.__property_overrides, + ) + + if self.__upgraded: + self.cfg = imageconfig.BlendedConfig( + self.cfg, + self.get_catalog( + self.IMG_CATALOG_INSTALLED + ).get_package_counts_by_pub(), + self.imgdir, + self.transport, + self.cfg.get_policy("use-system-repo"), + ) + + if self.cfg.version == imageconfig.CURRENT_VERSION: + for keyf in self.get_property(imageconfig.KEY_FILES): + if not os.path.exists(self.root + os.path.sep + keyf): + raise apx.ImageMissingKeyFile(keyf) + + self.__load_publisher_ssl() + + def __store_publisher_ssl(self): + """Normalizes publisher SSL configuration data, storing any + certificate files as needed in the image's SSL directory. This + logic is performed here in the image instead of ImageConfig as + it relies on special knowledge of the image structure.""" + + ssl_dir = os.path.join(self.imgdir, "ssl") + + def store_ssl_file(src): + try: + if not src or not os.path.exists(src): + # If SSL file doesn't exist (for + # whatever reason), then don't update + # configuration. (Let the failure + # happen later during an operation + # that requires the file.) + return + except EnvironmentError as e: + raise apx._convert_error(e) + + # Ensure ssl_dir exists; makedirs handles any errors. + misc.makedirs(ssl_dir) + + try: + # Destination name is based on digest of file. + # In order for this image to interoperate with + # older and newer clients, we must use sha-1 + # here. + dest = os.path.join( + ssl_dir, + misc.get_data_digest(src, hash_func=hashlib.sha1)[0], + ) + if src != dest: + portable.copyfile(src, dest) + + # Ensure file can be read by unprivileged users. + os.chmod(dest, misc.PKG_FILE_MODE) + except EnvironmentError as e: + raise apx._convert_error(e) + return dest + + for pub in self.cfg.publishers.values(): + # self.cfg.publishers is used because gen_publishers + # includes temporary publishers and this is only for + # configured ones. + repo = pub.repository + if not repo: + continue + + # Store and normalize ssl_cert and ssl_key. + for u in repo.origins + repo.mirrors: + for prop in ("ssl_cert", "ssl_key"): + pval = getattr(u, prop) + if pval: + pval = store_ssl_file(pval) + if not pval: + continue + # Store path as absolute to image root, + # it will be corrected on load to match + # actual image location if needed. + setattr( + u, + prop, + os.path.splitdrive(self.root)[0] + + os.path.sep + + misc.relpath(pval, start=self.root), + ) + + def __load_publisher_ssl(self): + """Should be called every time image configuration is loaded; + ensure ssl_cert and ssl_key properties of publisher repository + URI objects match current image location.""" + + ssl_dir = os.path.join(self.imgdir, "ssl") + + for pub in self.cfg.publishers.values(): + # self.cfg.publishers is used because gen_publishers + # includes temporary publishers and this is only for + # configured ones. + repo = pub.repository + if not repo: + continue + + for u in repo.origins + repo.mirrors: + for prop in ("ssl_cert", "ssl_key"): + pval = getattr(u, prop) + if not pval: + continue + if not os.path.join( + self.img_prefix, "ssl" + ) in os.path.dirname(pval): + continue + # If special image directory is part + # of path, then assume path should be + # rewritten to match current image + # location. + setattr( + u, prop, os.path.join(ssl_dir, os.path.basename(pval)) + ) + + def update_last_modified(self): + """Update $imgdir/modified timestamp for image; should be + called after any image modification has completed. This + provides a public interface for programs that want to monitor + the image for modifications via event ports, etc.""" + + # This is usually /var/pkg/modified. + fname = os.path.join(self.imgdir, "modified") + try: + with os.fdopen( + os.open(fname, os.O_CREAT | os.O_NOFOLLOW, misc.PKG_FILE_MODE) + ) as f: + os.utime(fname, None) + except EnvironmentError as e: + raise apx._convert_error(e) + + def save_config(self): + # First, create the image directories if they haven't been, so + # the configuration file can be written. + self.mkdirs() + self.__store_publisher_ssl() + self.cfg.write() + self.update_last_modified() + self.__load_publisher_ssl() + + # Remove the old the pkg.sysrepo(8) cache, if present. + cache_path = os.path.join( + self.root, global_settings.sysrepo_pub_cache_path + ) + try: + portable.remove(cache_path) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise apx._convert_error(e) + + if self.is_liveroot() and smf.get_state( + "svc:/application/pkg/system-repository:default" + ) in (smf.SMF_SVC_TMP_ENABLED, smf.SMF_SVC_ENABLED): + smf.refresh(["svc:/application/pkg/system-repository:default"]) + + # This ensures all old transport configuration is thrown away. + self.transport = transport.Transport(transport.ImageTransportCfg(self)) + + def hotfix_origin_cleanup(self): + """Remove any temporary hot-fix source origins""" + + changed = False + + for pub in self.cfg.publishers.values(): + if not pub.repository: + continue + + for o in pub.repository.origins: + if relib.search("/pkg_hfa_.*p5p/$", o.uri): + pub.repository.remove_origin(o) + changed = True + + if changed: + self.save_config() + + def mkdirs(self, root=None, version=None): + """Create any missing parts of the image's directory structure. + + 'root' is an optional path to a directory to create the new + image structure in. If not provided, the current image + directory is the default. + + 'version' is an optional integer value indicating the version + of the structure to create. If not provided, the current image + version is the default. + """ - def save_config(self): - # First, create the image directories if they haven't been, so - # the configuration file can be written. - self.mkdirs() - self.__store_publisher_ssl() - self.cfg.write() - self.update_last_modified() - self.__load_publisher_ssl() - - # Remove the old the pkg.sysrepo(8) cache, if present. - cache_path = os.path.join(self.root, - global_settings.sysrepo_pub_cache_path) + if not root: + root = self.imgdir + if not version: + version = self.version + + if version == self.CURRENT_VERSION: + img_dirs = [ + "cache/index", + "cache/publisher", + "cache/tmp", + "gui_cache", + "history", + "license", + "lost+found", + "publisher", + "ssl", + "state/installed", + "state/known", + ] + else: + img_dirs = [ + "download", + "file", + "gui_cache", + "history", + "index", + "lost+found", + "pkg", + "publisher", + "state/installed", + "state/known", + "tmp", + ] + + for sd in img_dirs: + try: + misc.makedirs(os.path.join(root, sd)) + except EnvironmentError as e: + raise apx._convert_error(e) + + def __set_dirs( + self, imgtype, root, startd=None, progtrack=None, purge=False + ): + # Ensure upgraded status is reset. + self.__upgraded = False + + if not self.__allow_liveroot() and root == misc.liveroot(): + if startd == None: + startd = root + raise RuntimeError( + "Live root image access is disabled but was \ + attempted.\nliveroot: {0}\nimage path: {1}".format( + misc.liveroot(), startd + ) + ) + + self.__root = root + self.type = imgtype + if self.type == IMG_USER: + self.img_prefix = img_user_prefix + else: + self.img_prefix = img_root_prefix + + # Use a new Transport object every time location is changed. + self.transport = transport.Transport(transport.ImageTransportCfg(self)) + + # cleanup specified path + if os.path.isdir(root): + try: + cwd = os.getcwd() + except Exception as e: + # If current directory can't be obtained for any + # reason, ignore the error. + cwd = None + + try: + os.chdir(root) + self.__root = os.getcwd() + except EnvironmentError as e: + raise apx._convert_error(e) + finally: + if cwd: + os.chdir(cwd) + + # If current image is locked, then it should be unlocked + # and then relocked after the imgdir is changed. This + # ensures that alternate BE scenarios work. + relock = self.imgdir and self.locked + if relock: + self.unlock() + + # Must set imgdir first. + self.__imgdir = os.path.join(self.root, self.img_prefix) + + # Force a reset of version. + self.version = -1 + + # Assume version 4+ configuration location. + self.__cfgpathname = os.path.join(self.imgdir, "pkg5.image") + + # In the case of initial image creation, purge is specified + # to ensure that when an image is created over an existing + # one, any old data is removed first. + if purge and os.path.exists(self.imgdir): + for entry in os.listdir(self.imgdir): + if entry == "ssl": + # Preserve certs and keys directory + # as a special exception. + continue + epath = os.path.join(self.imgdir, entry) try: - portable.remove(cache_path) + if os.path.isdir(epath): + shutil.rmtree(epath) + else: + portable.remove(epath) except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise apx._convert_error(e) - - if self.is_liveroot() and \ - smf.get_state( - "svc:/application/pkg/system-repository:default") in \ - (smf.SMF_SVC_TMP_ENABLED, smf.SMF_SVC_ENABLED): - smf.refresh([ - "svc:/application/pkg/system-repository:default"]) - - # This ensures all old transport configuration is thrown away. - self.transport = transport.Transport( - transport.ImageTransportCfg(self)) - - def hotfix_origin_cleanup(self): - """Remove any temporary hot-fix source origins""" - - changed = False - - for pub in self.cfg.publishers.values(): - if not pub.repository: - continue - - for o in pub.repository.origins: - if relib.search('/pkg_hfa_.*p5p/$', o.uri): - pub.repository.remove_origin(o) - changed = True - - if changed: - self.save_config() - - def mkdirs(self, root=None, version=None): - """Create any missing parts of the image's directory structure. - - 'root' is an optional path to a directory to create the new - image structure in. If not provided, the current image - directory is the default. - - 'version' is an optional integer value indicating the version - of the structure to create. If not provided, the current image - version is the default. - """ - - if not root: - root = self.imgdir - if not version: - version = self.version - - if version == self.CURRENT_VERSION: - img_dirs = ["cache/index", "cache/publisher", - "cache/tmp", "gui_cache", "history", "license", - "lost+found", "publisher", "ssl", "state/installed", - "state/known"] - else: - img_dirs = ["download", "file", "gui_cache", "history", - "index", "lost+found", "pkg", "publisher", - "state/installed", "state/known", "tmp"] - - for sd in img_dirs: - try: - misc.makedirs(os.path.join(root, sd)) - except EnvironmentError as e: - raise apx._convert_error(e) - - def __set_dirs(self, imgtype, root, startd=None, progtrack=None, - purge=False): - # Ensure upgraded status is reset. - self.__upgraded = False - - if not self.__allow_liveroot() and root == misc.liveroot(): - if startd == None: - startd = root - raise RuntimeError( - "Live root image access is disabled but was \ - attempted.\nliveroot: {0}\nimage path: {1}".format( - misc.liveroot(), startd)) - - self.__root = root - self.type = imgtype - if self.type == IMG_USER: - self.img_prefix = img_user_prefix - else: - self.img_prefix = img_root_prefix - - # Use a new Transport object every time location is changed. - self.transport = transport.Transport( - transport.ImageTransportCfg(self)) - - # cleanup specified path - if os.path.isdir(root): - try: - cwd = os.getcwd() - except Exception as e: - # If current directory can't be obtained for any - # reason, ignore the error. - cwd = None - - try: - os.chdir(root) - self.__root = os.getcwd() - except EnvironmentError as e: - raise apx._convert_error(e) - finally: - if cwd: - os.chdir(cwd) - - # If current image is locked, then it should be unlocked - # and then relocked after the imgdir is changed. This - # ensures that alternate BE scenarios work. - relock = self.imgdir and self.locked - if relock: - self.unlock() - - # Must set imgdir first. - self.__imgdir = os.path.join(self.root, self.img_prefix) - - # Force a reset of version. + raise apx._convert_error(e) + elif not purge: + # Determine if the version 4 configuration file exists. + if not os.path.exists(self.__cfgpathname): + self.__cfgpathname = os.path.join(self.imgdir, "cfg_cache") + + # Load the image configuration. + self.__load_config() + + if not purge: + try: + self.version = int(self.cfg.get_property("image", "version")) + except (cfg.PropertyConfigError, ValueError): + # If version couldn't be read from + # configuration, then allow fallback + # path below to set things right. self.version = -1 - # Assume version 4+ configuration location. - self.__cfgpathname = os.path.join(self.imgdir, "pkg5.image") - - # In the case of initial image creation, purge is specified - # to ensure that when an image is created over an existing - # one, any old data is removed first. - if purge and os.path.exists(self.imgdir): - for entry in os.listdir(self.imgdir): - if entry == "ssl": - # Preserve certs and keys directory - # as a special exception. - continue - epath = os.path.join(self.imgdir, entry) - try: - if os.path.isdir(epath): - shutil.rmtree(epath) - else: - portable.remove(epath) - except EnvironmentError as e: - raise apx._convert_error(e) - elif not purge: - # Determine if the version 4 configuration file exists. - if not os.path.exists(self.__cfgpathname): - self.__cfgpathname = os.path.join(self.imgdir, - "cfg_cache") - - # Load the image configuration. - self.__load_config() - - if not purge: - try: - self.version = int(self.cfg.get_property("image", - "version")) - except (cfg.PropertyConfigError, ValueError): - # If version couldn't be read from - # configuration, then allow fallback - # path below to set things right. - self.version = -1 - - if self.version <= 0: - # If version doesn't exist, attempt to determine version - # based on structure. - pub_root = os.path.join(self.imgdir, IMG_PUB_DIR) - if purge: - # This is a new image. - self.version = self.CURRENT_VERSION - elif os.path.exists(pub_root): - cache_root = os.path.join(self.imgdir, "cache") - if os.path.exists(cache_root): - # The image must be corrupted, as the - # version should have been loaded from - # configuration. For now, raise an - # exception. In the future, this - # behaviour should probably be optional - # so that pkg fix or pkg verify can - # still use the image. - raise apx.UnsupportedImageError( - self.root) - else: - # Assume version 3 image. - self.version = 3 - - # Reload image configuration again now that - # version has been determined so that property - # definitions match. - self.__load_config() - elif os.path.exists(os.path.join(self.imgdir, - "catalog")): - self.version = 2 - - # Reload image configuration again now that - # version has been determined so that property - # definitions match. - self.__load_config() - else: - # Format is too old or invalid. - raise apx.UnsupportedImageError(self.root) - - if self.version > self.CURRENT_VERSION or self.version < 2: - # Image is too new or too old. - raise apx.UnsupportedImageError(self.root) - - # Ensure image version matches determined one; this must - # be set *after* the version checks above. - self.cfg.set_property("image", "version", self.version) - - # Remaining dirs may now be set. - if self.version == self.CURRENT_VERSION: - self.__tmpdir = os.path.join(self.imgdir, "cache", - "tmp") - else: - self.__tmpdir = os.path.join(self.imgdir, "tmp") - self._statedir = os.path.join(self.imgdir, "state") - self.plandir = os.path.join(self.__tmpdir, "plan") - self.update_index_dir() - - self.history.root_dir = self.imgdir - self.__lockfile = lockfile.LockFile(os.path.join(self.imgdir, - "lock"), set_lockstr=lockfile.client_lock_set_str, - get_lockstr=lockfile.client_lock_get_str, - failure_exc=apx.ImageLockedError, - provide_mutex=False) - - if relock: - self.lock() - - # Setup cache directories. - self.__read_cache_dirs = [] - self._incoming_cache_dir = None - self.__user_cache_dir = None - self.__write_cache_dir = None - self.__write_cache_root = None - # The user specified cache is used as an additional place to - # read cache data from, but as the only place to store new - # cache data. - if "PKG_CACHEROOT" in os.environ: - # If set, cache is structured like /var/pkg/publisher. - # get_cachedirs() will build paths for each publisher's - # cache using this directory. - self.__user_cache_dir = os.path.normpath( - os.environ["PKG_CACHEROOT"]) - self.__write_cache_root = self.__user_cache_dir - elif "PKG_CACHEDIR" in os.environ: - # If set, cache is a flat structure that is used for - # all publishers. - self.__user_cache_dir = os.path.normpath( - os.environ["PKG_CACHEDIR"]) - self.__write_cache_dir = self.__user_cache_dir - # Since the cache structure is flat, add it to the - # list of global read caches. - self.__read_cache_dirs.append(self.__user_cache_dir) - if self.__user_cache_dir: - self._incoming_cache_dir = os.path.join( - self.__user_cache_dir, - "incoming-{0:d}".format(os.getpid())) - - if self.version < 4: - self.__action_cache_dir = self.temporary_dir() + if self.version <= 0: + # If version doesn't exist, attempt to determine version + # based on structure. + pub_root = os.path.join(self.imgdir, IMG_PUB_DIR) + if purge: + # This is a new image. + self.version = self.CURRENT_VERSION + elif os.path.exists(pub_root): + cache_root = os.path.join(self.imgdir, "cache") + if os.path.exists(cache_root): + # The image must be corrupted, as the + # version should have been loaded from + # configuration. For now, raise an + # exception. In the future, this + # behaviour should probably be optional + # so that pkg fix or pkg verify can + # still use the image. + raise apx.UnsupportedImageError(self.root) else: - self.__action_cache_dir = os.path.join(self.imgdir, - "cache") - - if self.version < 4: - if not self.__user_cache_dir: - self.__write_cache_dir = os.path.join( - self.imgdir, "download") - self._incoming_cache_dir = os.path.join( - self.__write_cache_dir, - "incoming-{0:d}".format(os.getpid())) - self.__read_cache_dirs.append(os.path.normpath( - os.path.join(self.imgdir, "download"))) - elif not self._incoming_cache_dir: - # Only a global incoming cache exists for newer images. - self._incoming_cache_dir = os.path.join(self.imgdir, - "cache", "incoming-{0:d}".format(os.getpid())) - - # Test if we have the permissions to create the cache - # incoming directory in this hierarchy. If not, we'll need to - # move it somewhere else. - try: - os.makedirs(self._incoming_cache_dir) - except EnvironmentError as e: - if e.errno == errno.EACCES or e.errno == errno.EROFS: - self.__write_cache_dir = tempfile.mkdtemp( - prefix="download-{0:d}-".format( - os.getpid())) - self._incoming_cache_dir = os.path.normpath( - os.path.join(self.__write_cache_dir, - "incoming-{0:d}".format(os.getpid()))) - self.__read_cache_dirs.append( - self.__write_cache_dir) - # There's no image cleanup hook, so we'll just - # remove this directory on process exit. - atexit.register(shutil.rmtree, - self.__write_cache_dir, ignore_errors=True) - else: - os.rmdir(self._incoming_cache_dir) - - # Forcibly discard image catalogs so they can be re-loaded - # from the new location if they are already loaded. This - # also prevents scribbling on image state information in - # the wrong location. - self.__init_catalogs() - - # Upgrade the image's format if needed. - self.update_format(allow_unprivileged=True, - progtrack=progtrack) - - # If we haven't loaded the system publisher configuration, do - # that now. - if isinstance(self.cfg, imageconfig.ImageConfig): - self.cfg = imageconfig.BlendedConfig(self.cfg, - self.get_catalog(self.IMG_CATALOG_INSTALLED).\ - get_package_counts_by_pub(), - self.imgdir, self.transport, - self.cfg.get_policy("use-system-repo")) - - # Check to see if any system publishers have been changed. - # If so they need to be refreshed, so clear last_refreshed. - for p in self.cfg.modified_pubs: - p.meta_root = self._get_publisher_meta_root(p.prefix) - p.last_refreshed = None - - # Check to see if any system publishers have been - # removed. If they have, remove their metadata and - # rebuild the catalogs. - changed = False - for p in self.cfg.removed_pubs: - p.meta_root = self._get_publisher_meta_root(p.prefix) - try: - self.remove_publisher_metadata(p, rebuild=False) - changed = True - except apx.PermissionsException: - pass - if changed: - self.__rebuild_image_catalogs() - - # we delay writing out any new system repository configuration - # until we've updated on on-disk catalog state. (otherwise we - # could lose track of syspub publishers changes and either - # return stale catalog information, or not do refreshes when - # we need to.) - self.cfg.write_sys_cfg() - - self.__load_publisher_ssl() - if purge: - # Configuration shouldn't be written again unless this - # is an image creation operation (hence the purge). - self.save_config() + # Assume version 3 image. + self.version = 3 - # Let the linked image subsystem know that root is moving - self.linked._init_root() - - # load image avoid pkg set - self.__avoid_set_load() - - def update_format(self, allow_unprivileged=False, progtrack=None): - """Transform the existing image structure and its data to - the newest format. Callers are responsible for locking. - - 'allow_unprivileged' is an optional boolean indicating - whether a fallback to an in-memory only upgrade should - be performed if a PermissionsException is encountered - during the operation. + # Reload image configuration again now that + # version has been determined so that property + # definitions match. + self.__load_config() + elif os.path.exists(os.path.join(self.imgdir, "catalog")): + self.version = 2 - 'progtrack' is an optional ProgressTracker object. - """ + # Reload image configuration again now that + # version has been determined so that property + # definitions match. + self.__load_config() + else: + # Format is too old or invalid. + raise apx.UnsupportedImageError(self.root) + + if self.version > self.CURRENT_VERSION or self.version < 2: + # Image is too new or too old. + raise apx.UnsupportedImageError(self.root) + + # Ensure image version matches determined one; this must + # be set *after* the version checks above. + self.cfg.set_property("image", "version", self.version) + + # Remaining dirs may now be set. + if self.version == self.CURRENT_VERSION: + self.__tmpdir = os.path.join(self.imgdir, "cache", "tmp") + else: + self.__tmpdir = os.path.join(self.imgdir, "tmp") + self._statedir = os.path.join(self.imgdir, "state") + self.plandir = os.path.join(self.__tmpdir, "plan") + self.update_index_dir() + + self.history.root_dir = self.imgdir + self.__lockfile = lockfile.LockFile( + os.path.join(self.imgdir, "lock"), + set_lockstr=lockfile.client_lock_set_str, + get_lockstr=lockfile.client_lock_get_str, + failure_exc=apx.ImageLockedError, + provide_mutex=False, + ) + + if relock: + self.lock() + + # Setup cache directories. + self.__read_cache_dirs = [] + self._incoming_cache_dir = None + self.__user_cache_dir = None + self.__write_cache_dir = None + self.__write_cache_root = None + # The user specified cache is used as an additional place to + # read cache data from, but as the only place to store new + # cache data. + if "PKG_CACHEROOT" in os.environ: + # If set, cache is structured like /var/pkg/publisher. + # get_cachedirs() will build paths for each publisher's + # cache using this directory. + self.__user_cache_dir = os.path.normpath( + os.environ["PKG_CACHEROOT"] + ) + self.__write_cache_root = self.__user_cache_dir + elif "PKG_CACHEDIR" in os.environ: + # If set, cache is a flat structure that is used for + # all publishers. + self.__user_cache_dir = os.path.normpath(os.environ["PKG_CACHEDIR"]) + self.__write_cache_dir = self.__user_cache_dir + # Since the cache structure is flat, add it to the + # list of global read caches. + self.__read_cache_dirs.append(self.__user_cache_dir) + if self.__user_cache_dir: + self._incoming_cache_dir = os.path.join( + self.__user_cache_dir, "incoming-{0:d}".format(os.getpid()) + ) + + if self.version < 4: + self.__action_cache_dir = self.temporary_dir() + else: + self.__action_cache_dir = os.path.join(self.imgdir, "cache") + + if self.version < 4: + if not self.__user_cache_dir: + self.__write_cache_dir = os.path.join(self.imgdir, "download") + self._incoming_cache_dir = os.path.join( + self.__write_cache_dir, "incoming-{0:d}".format(os.getpid()) + ) + self.__read_cache_dirs.append( + os.path.normpath(os.path.join(self.imgdir, "download")) + ) + elif not self._incoming_cache_dir: + # Only a global incoming cache exists for newer images. + self._incoming_cache_dir = os.path.join( + self.imgdir, "cache", "incoming-{0:d}".format(os.getpid()) + ) + + # Test if we have the permissions to create the cache + # incoming directory in this hierarchy. If not, we'll need to + # move it somewhere else. + try: + os.makedirs(self._incoming_cache_dir) + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + self.__write_cache_dir = tempfile.mkdtemp( + prefix="download-{0:d}-".format(os.getpid()) + ) + self._incoming_cache_dir = os.path.normpath( + os.path.join( + self.__write_cache_dir, + "incoming-{0:d}".format(os.getpid()), + ) + ) + self.__read_cache_dirs.append(self.__write_cache_dir) + # There's no image cleanup hook, so we'll just + # remove this directory on process exit. + atexit.register( + shutil.rmtree, self.__write_cache_dir, ignore_errors=True + ) + else: + os.rmdir(self._incoming_cache_dir) + + # Forcibly discard image catalogs so they can be re-loaded + # from the new location if they are already loaded. This + # also prevents scribbling on image state information in + # the wrong location. + self.__init_catalogs() + + # Upgrade the image's format if needed. + self.update_format(allow_unprivileged=True, progtrack=progtrack) + + # If we haven't loaded the system publisher configuration, do + # that now. + if isinstance(self.cfg, imageconfig.ImageConfig): + self.cfg = imageconfig.BlendedConfig( + self.cfg, + self.get_catalog( + self.IMG_CATALOG_INSTALLED + ).get_package_counts_by_pub(), + self.imgdir, + self.transport, + self.cfg.get_policy("use-system-repo"), + ) + + # Check to see if any system publishers have been changed. + # If so they need to be refreshed, so clear last_refreshed. + for p in self.cfg.modified_pubs: + p.meta_root = self._get_publisher_meta_root(p.prefix) + p.last_refreshed = None + + # Check to see if any system publishers have been + # removed. If they have, remove their metadata and + # rebuild the catalogs. + changed = False + for p in self.cfg.removed_pubs: + p.meta_root = self._get_publisher_meta_root(p.prefix) + try: + self.remove_publisher_metadata(p, rebuild=False) + changed = True + except apx.PermissionsException: + pass + if changed: + self.__rebuild_image_catalogs() + + # we delay writing out any new system repository configuration + # until we've updated on on-disk catalog state. (otherwise we + # could lose track of syspub publishers changes and either + # return stale catalog information, or not do refreshes when + # we need to.) + self.cfg.write_sys_cfg() + + self.__load_publisher_ssl() + if purge: + # Configuration shouldn't be written again unless this + # is an image creation operation (hence the purge). + self.save_config() + + # Let the linked image subsystem know that root is moving + self.linked._init_root() + + # load image avoid pkg set + self.__avoid_set_load() + + def update_format(self, allow_unprivileged=False, progtrack=None): + """Transform the existing image structure and its data to + the newest format. Callers are responsible for locking. + + 'allow_unprivileged' is an optional boolean indicating + whether a fallback to an in-memory only upgrade should + be performed if a PermissionsException is encountered + during the operation. + + 'progtrack' is an optional ProgressTracker object. + """ - if self.version == self.CURRENT_VERSION: - # Already upgraded. - self.__upgraded = True - - # If pre-upgrade data still exists; fire off a - # process to dump it so execution can continue. - orig_root = self.imgdir + ".old" - nullf = open(os.devnull, "w") - if os.path.exists(orig_root): - # Ensure all output is discarded; it really - # doesn't matter if this succeeds. - cmdargs = ["/usr/bin/rm", "-rf", orig_root] - subprocess.Popen(cmdargs, stdout=nullf, - stderr=nullf) - nullf.close() - return False - - if not progtrack: - progtrack = progress.NullProgressTracker() - - # Not technically 'caching', but close enough ... - progtrack.cache_catalogs_start() - - # Upgrade catalog data if needed. - self.__upgrade_catalogs() - - # Data conversion finished. - self.__upgraded = True - - # Determine if on-disk portion of the upgrade is allowed. - if self.allow_ondisk_upgrade == False: - return True - - if self.allow_ondisk_upgrade is None and self.type != IMG_USER: - if not self.is_liveroot() and not self.is_zone(): - # By default, don't update image format if it - # is not the live root, and is not for a zone. - self.allow_ondisk_upgrade = False - return True - - # The logic to perform the on-disk upgrade is in its own - # function so that it can easily be wrapped with locking logic. - with self.locked_op("update-format", - allow_unprivileged=allow_unprivileged): - self.__upgrade_image_format(progtrack, - allow_unprivileged=allow_unprivileged) - - progtrack.cache_catalogs_done() + if self.version == self.CURRENT_VERSION: + # Already upgraded. + self.__upgraded = True + + # If pre-upgrade data still exists; fire off a + # process to dump it so execution can continue. + orig_root = self.imgdir + ".old" + nullf = open(os.devnull, "w") + if os.path.exists(orig_root): + # Ensure all output is discarded; it really + # doesn't matter if this succeeds. + cmdargs = ["/usr/bin/rm", "-rf", orig_root] + subprocess.Popen(cmdargs, stdout=nullf, stderr=nullf) + nullf.close() + return False + + if not progtrack: + progtrack = progress.NullProgressTracker() + + # Not technically 'caching', but close enough ... + progtrack.cache_catalogs_start() + + # Upgrade catalog data if needed. + self.__upgrade_catalogs() + + # Data conversion finished. + self.__upgraded = True + + # Determine if on-disk portion of the upgrade is allowed. + if self.allow_ondisk_upgrade == False: + return True + + if self.allow_ondisk_upgrade is None and self.type != IMG_USER: + if not self.is_liveroot() and not self.is_zone(): + # By default, don't update image format if it + # is not the live root, and is not for a zone. + self.allow_ondisk_upgrade = False return True - def __upgrade_catalogs(self): - """Private helper function for update_format.""" - - if self.version >= 3: - # Nothing to do. - return - - def installed_file_publisher(filepath): - """Find the pkg's installed file named by filepath. - Return the publisher that installed this package.""" + # The logic to perform the on-disk upgrade is in its own + # function so that it can easily be wrapped with locking logic. + with self.locked_op( + "update-format", allow_unprivileged=allow_unprivileged + ): + self.__upgrade_image_format( + progtrack, allow_unprivileged=allow_unprivileged + ) + + progtrack.cache_catalogs_done() + return True + + def __upgrade_catalogs(self): + """Private helper function for update_format.""" + + if self.version >= 3: + # Nothing to do. + return + + def installed_file_publisher(filepath): + """Find the pkg's installed file named by filepath. + Return the publisher that installed this package.""" + + f = open(filepath) + try: + flines = f.readlines() + version, pub = flines + version = version.strip() + pub = pub.strip() + f.close() + except ValueError: + # If ValueError occurs, the installed file is of + # a previous format. For upgrades to work, it's + # necessary to assume that the package was + # installed from the highest ranked publisher. + # Here, the publisher is setup to record that. + if flines: + pub = flines[0] + pub = pub.strip() + newpub = "{0}_{1}".format(pkg.fmri.PREF_PUB_PFX, pub) + else: + newpub = "{0}_{1}".format( + pkg.fmri.PREF_PUB_PFX, + self.get_highest_ranked_publisher(), + ) + pub = newpub + assert pub + return pub + + # First, load the old package state information. + installed_state_dir = "{0}/state/installed".format(self.imgdir) + + # If the state directory structure has already been created, + # loading information from it is fast. The directory is + # populated with files, named by their (url-encoded) FMRI, + # which point to the "installed" file in the corresponding + # directory under /var/pkg. + installed = {} + + def add_installed_entry(f): + path = "{0}/pkg/{1}/installed".format(self.imgdir, f.get_dir_path()) + pub = installed_file_publisher(path) + f.set_publisher(pub) + installed[f.pkg_name] = f + + for pl in os.listdir(installed_state_dir): + fmristr = "{0}".format(unquote(pl)) + f = pkg.fmri.PkgFmri(fmristr) + add_installed_entry(f) + + # Create the new image catalogs. + kcat = pkg.catalog.Catalog( + batch_mode=True, manifest_cb=self._manifest_cb, sign=False + ) + icat = pkg.catalog.Catalog( + batch_mode=True, manifest_cb=self._manifest_cb, sign=False + ) + + # XXX For backwards compatibility, 'upgradability' of packages + # is calculated and stored based on whether a given pkg stem + # matches the newest version in the catalog. This is quite + # expensive (due to overhead), but at least the cost is + # consolidated here. This comparison is also cross-publisher, + # as it used to be. + newest = {} + old_pub_cats = [] + for pub in self.gen_publishers(): + try: + old_cat = pkg.server.catalog.ServerCatalog( + pub.meta_root, read_only=True, publisher=pub.prefix + ) - f = open(filepath) - try: - flines = f.readlines() - version, pub = flines - version = version.strip() - pub = pub.strip() - f.close() - except ValueError: - # If ValueError occurs, the installed file is of - # a previous format. For upgrades to work, it's - # necessary to assume that the package was - # installed from the highest ranked publisher. - # Here, the publisher is setup to record that. - if flines: - pub = flines[0] - pub = pub.strip() - newpub = "{0}_{1}".format( - pkg.fmri.PREF_PUB_PFX, pub) - else: - newpub = "{0}_{1}".format( - pkg.fmri.PREF_PUB_PFX, - self.get_highest_ranked_publisher()) - pub = newpub - assert pub - return pub - - # First, load the old package state information. - installed_state_dir = "{0}/state/installed".format(self.imgdir) - - # If the state directory structure has already been created, - # loading information from it is fast. The directory is - # populated with files, named by their (url-encoded) FMRI, - # which point to the "installed" file in the corresponding - # directory under /var/pkg. - installed = {} - def add_installed_entry(f): - path = "{0}/pkg/{1}/installed".format( - self.imgdir, f.get_dir_path()) - pub = installed_file_publisher(path) - f.set_publisher(pub) - installed[f.pkg_name] = f - - for pl in os.listdir(installed_state_dir): - fmristr = "{0}".format(unquote(pl)) - f = pkg.fmri.PkgFmri(fmristr) - add_installed_entry(f) - - # Create the new image catalogs. - kcat = pkg.catalog.Catalog(batch_mode=True, - manifest_cb=self._manifest_cb, sign=False) - icat = pkg.catalog.Catalog(batch_mode=True, - manifest_cb=self._manifest_cb, sign=False) - - # XXX For backwards compatibility, 'upgradability' of packages - # is calculated and stored based on whether a given pkg stem - # matches the newest version in the catalog. This is quite - # expensive (due to overhead), but at least the cost is - # consolidated here. This comparison is also cross-publisher, - # as it used to be. - newest = {} - old_pub_cats = [] - for pub in self.gen_publishers(): - try: - old_cat = pkg.server.catalog.ServerCatalog( - pub.meta_root, read_only=True, - publisher=pub.prefix) - - old_pub_cats.append((pub, old_cat)) - for f in old_cat.fmris(): - nver = newest.get(f.pkg_name, None) - newest[f.pkg_name] = max(nver, - f.version) - - except EnvironmentError as e: - # If a catalog file is just missing, ignore it. - # If there's a worse error, make sure the user - # knows about it. - if e.errno != errno.ENOENT: - raise - - # Next, load the existing catalog data and convert it. - pub_cats = [] - for pub, old_cat in old_pub_cats: - new_cat = pub.catalog - new_cat.batch_mode = True - new_cat.sign = False - if new_cat.exists: - new_cat.destroy() - - # First convert the old publisher catalog to - # the new format. - for f in old_cat.fmris(): - new_cat.add_package(f) - - # Now populate the image catalogs. - states = [pkgdefs.PKG_STATE_KNOWN, - pkgdefs.PKG_STATE_V0] - mdata = { "states": states } - if f.version != newest[f.pkg_name]: - states.append( - pkgdefs.PKG_STATE_UPGRADABLE) - - inst_fmri = installed.get(f.pkg_name, None) - if inst_fmri and \ - inst_fmri.version == f.version and \ - pkg.fmri.is_same_publisher(f.publisher, - inst_fmri.publisher): - states.append( - pkgdefs.PKG_STATE_INSTALLED) - if inst_fmri.preferred_publisher(): - # Strip the PREF_PUB_PFX. - inst_fmri.set_publisher( - inst_fmri.get_publisher()) - icat.add_package(f, metadata=mdata) - del installed[f.pkg_name] - kcat.add_package(f, metadata=mdata) - - # Normally, the Catalog's attributes are automatically - # populated as a result of catalog operations. But in - # this case, the new Catalog's attributes should match - # those of the old catalog. - old_lm = old_cat.last_modified() - if old_lm: - # Can be None for empty v0 catalogs. - old_lm = pkg.catalog.ts_to_datetime(old_lm) - new_cat.last_modified = old_lm - new_cat.version = 0 - - # Add to the list of catalogs to save. - new_cat.batch_mode = False - pub_cats.append(new_cat) - - # Discard the old catalog objects. - old_pub_cats = None - - for f in installed.values(): - # Any remaining FMRIs need to be added to all of the - # image catalogs. - states = [pkgdefs.PKG_STATE_INSTALLED, - pkgdefs.PKG_STATE_V0] - mdata = { "states": states } - # This package may be installed from a publisher that - # is no longer known or has been disabled. - if f.pkg_name in newest and \ - f.version != newest[f.pkg_name]: - states.append(pkgdefs.PKG_STATE_UPGRADABLE) - - if f.preferred_publisher(): - # Strip the PREF_PUB_PFX. - f.set_publisher(f.get_publisher()) - - icat.add_package(f, metadata=mdata) - kcat.add_package(f, metadata=mdata) - - for cat in pub_cats + [kcat, icat]: - cat.finalize() - - # Cache converted catalogs so that operations can function as - # expected if the on-disk format of the catalogs isn't upgraded. - self.__catalogs[self.IMG_CATALOG_KNOWN] = kcat - self.__catalogs[self.IMG_CATALOG_INSTALLED] = icat - - def __upgrade_image_format(self, progtrack, allow_unprivileged=False): - """Private helper function for update_format.""" + old_pub_cats.append((pub, old_cat)) + for f in old_cat.fmris(): + nver = newest.get(f.pkg_name, None) + newest[f.pkg_name] = max(nver, f.version) + + except EnvironmentError as e: + # If a catalog file is just missing, ignore it. + # If there's a worse error, make sure the user + # knows about it. + if e.errno != errno.ENOENT: + raise + + # Next, load the existing catalog data and convert it. + pub_cats = [] + for pub, old_cat in old_pub_cats: + new_cat = pub.catalog + new_cat.batch_mode = True + new_cat.sign = False + if new_cat.exists: + new_cat.destroy() + + # First convert the old publisher catalog to + # the new format. + for f in old_cat.fmris(): + new_cat.add_package(f) + + # Now populate the image catalogs. + states = [pkgdefs.PKG_STATE_KNOWN, pkgdefs.PKG_STATE_V0] + mdata = {"states": states} + if f.version != newest[f.pkg_name]: + states.append(pkgdefs.PKG_STATE_UPGRADABLE) + + inst_fmri = installed.get(f.pkg_name, None) + if ( + inst_fmri + and inst_fmri.version == f.version + and pkg.fmri.is_same_publisher( + f.publisher, inst_fmri.publisher + ) + ): + states.append(pkgdefs.PKG_STATE_INSTALLED) + if inst_fmri.preferred_publisher(): + # Strip the PREF_PUB_PFX. + inst_fmri.set_publisher(inst_fmri.get_publisher()) + icat.add_package(f, metadata=mdata) + del installed[f.pkg_name] + kcat.add_package(f, metadata=mdata) + + # Normally, the Catalog's attributes are automatically + # populated as a result of catalog operations. But in + # this case, the new Catalog's attributes should match + # those of the old catalog. + old_lm = old_cat.last_modified() + if old_lm: + # Can be None for empty v0 catalogs. + old_lm = pkg.catalog.ts_to_datetime(old_lm) + new_cat.last_modified = old_lm + new_cat.version = 0 + + # Add to the list of catalogs to save. + new_cat.batch_mode = False + pub_cats.append(new_cat) + + # Discard the old catalog objects. + old_pub_cats = None + + for f in installed.values(): + # Any remaining FMRIs need to be added to all of the + # image catalogs. + states = [pkgdefs.PKG_STATE_INSTALLED, pkgdefs.PKG_STATE_V0] + mdata = {"states": states} + # This package may be installed from a publisher that + # is no longer known or has been disabled. + if f.pkg_name in newest and f.version != newest[f.pkg_name]: + states.append(pkgdefs.PKG_STATE_UPGRADABLE) + + if f.preferred_publisher(): + # Strip the PREF_PUB_PFX. + f.set_publisher(f.get_publisher()) + + icat.add_package(f, metadata=mdata) + kcat.add_package(f, metadata=mdata) + + for cat in pub_cats + [kcat, icat]: + cat.finalize() + + # Cache converted catalogs so that operations can function as + # expected if the on-disk format of the catalogs isn't upgraded. + self.__catalogs[self.IMG_CATALOG_KNOWN] = kcat + self.__catalogs[self.IMG_CATALOG_INSTALLED] = icat + + def __upgrade_image_format(self, progtrack, allow_unprivileged=False): + """Private helper function for update_format.""" + + try: + # Ensure Image directory structure is valid. + self.mkdirs() + except apx.PermissionsException as e: + if not allow_unprivileged: + raise + # An unprivileged user is attempting to use the + # new client with an old image. Since none of + # the changes can be saved, warn the user and + # then return. + # + # Raising an exception here would be a decidedly + # bad thing as it would disrupt find_root, etc. + return + + # This has to be done after the permissions check above. + # First, create a new temporary root to store the converted + # image metadata. + tmp_root = self.imgdir + ".new" + try: + shutil.rmtree(tmp_root) + except EnvironmentError as e: + if e.errno in (errno.EROFS, errno.EPERM) and allow_unprivileged: + # Bail. + return + if e.errno != errno.ENOENT: + raise apx._convert_error(e) + + try: + self.mkdirs(root=tmp_root, version=self.CURRENT_VERSION) + except apx.PermissionsException as e: + # Same handling needed as above; but not after this. + if not allow_unprivileged: + raise + return + + def linktree(src_root, dest_root): + if not os.path.exists(src_root): + # Nothing to do. + return + for entry in os.listdir(src_root): + src = os.path.join(src_root, entry) + dest = os.path.join(dest_root, entry) + if os.path.isdir(src): + # Recurse into directory to link + # its contents. + misc.makedirs(dest) + linktree(src, dest) + continue + # Link source file into target dest. + assert os.path.isfile(src) try: - # Ensure Image directory structure is valid. - self.mkdirs() - except apx.PermissionsException as e: - if not allow_unprivileged: - raise - # An unprivileged user is attempting to use the - # new client with an old image. Since none of - # the changes can be saved, warn the user and - # then return. - # - # Raising an exception here would be a decidedly - # bad thing as it would disrupt find_root, etc. - return - - # This has to be done after the permissions check above. - # First, create a new temporary root to store the converted - # image metadata. - tmp_root = self.imgdir + ".new" - try: - shutil.rmtree(tmp_root) + os.link(src, dest) except EnvironmentError as e: - if e.errno in (errno.EROFS, errno.EPERM) and \ - allow_unprivileged: - # Bail. - return - if e.errno != errno.ENOENT: - raise apx._convert_error(e) - - try: - self.mkdirs(root=tmp_root, version=self.CURRENT_VERSION) - except apx.PermissionsException as e: - # Same handling needed as above; but not after this. - if not allow_unprivileged: - raise - return - - def linktree(src_root, dest_root): - if not os.path.exists(src_root): - # Nothing to do. - return - - for entry in os.listdir(src_root): - src = os.path.join(src_root, entry) - dest = os.path.join(dest_root, entry) - if os.path.isdir(src): - # Recurse into directory to link - # its contents. - misc.makedirs(dest) - linktree(src, dest) - continue - # Link source file into target dest. - assert os.path.isfile(src) - try: - os.link(src, dest) - except EnvironmentError as e: - raise apx._convert_error(e) - - # Next, link history data into place. - linktree(self.history.path, os.path.join(tmp_root, - "history")) - - # Next, link index data into place. - linktree(self.index_dir, os.path.join(tmp_root, - "cache", "index")) - - # Next, link ssl data into place. - linktree(os.path.join(self.imgdir, "ssl"), - os.path.join(tmp_root, "ssl")) - - # Next, write state data into place. - if self.version < 3: - # Image state and publisher metadata - tmp_state_root = os.path.join(tmp_root, "state") - - # Update image catalog locations. - kcat = self.get_catalog(self.IMG_CATALOG_KNOWN) - icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - kcat.meta_root = os.path.join(tmp_state_root, - self.IMG_CATALOG_KNOWN) - icat.meta_root = os.path.join(tmp_state_root, - self.IMG_CATALOG_INSTALLED) - - # Assume that since mkdirs succeeded that the remaining - # data can be saved and the image structure can be - # upgraded. But first, attempt to save the image - # catalogs. - for cat in icat, kcat: - misc.makedirs(cat.meta_root) - cat.save() - else: - # For version 3 and newer images, just link existing - # state information into place. - linktree(self._statedir, os.path.join(tmp_root, - "state")) - - # Reset each publisher's meta_root and ensure its complete - # directory structure is intact. Then either link in or - # write out the metadata for each publisher. - for pub in self.gen_publishers(): - old_root = pub.meta_root - old_cat_root = pub.catalog_root - old_cert_root = pub.cert_root - pub.meta_root = os.path.join(tmp_root, - IMG_PUB_DIR, pub.prefix) - pub.create_meta_root() - - if self.version < 3: - # Should be loaded in memory and transformed - # already, so just need to be written out. - pub.catalog.save() - continue - - # Now link any catalog or cert files from the old root - # into the new root. - linktree(old_cat_root, pub.catalog_root) - linktree(old_cert_root, pub.cert_root) - - # Finally, create a directory for the publisher's - # manifests to live in. - misc.makedirs(os.path.join(pub.meta_root, "pkg")) - - # Next, link licenses and manifests of installed packages into - # new image dir. - for pfmri in self.gen_installed_pkgs(): - # Link licenses. - mdir = self.get_manifest_dir(pfmri) - for entry in os.listdir(mdir): - if not entry.startswith("license."): - continue - src = os.path.join(mdir, entry) - if os.path.isdir(src): - # Ignore broken licenses. - continue - - # For conversion, ensure destination link uses - # encoded license name to match how new image - # format stores licenses. - dest = os.path.join(tmp_root, "license", - pfmri.get_dir_path(stemonly=True), - quote(entry, "")) - misc.makedirs(os.path.dirname(dest)) - try: - os.link(src, dest) - except EnvironmentError as e: - raise apx._convert_error(e) - - # Link manifest. - src = self.get_manifest_path(pfmri) - dest = os.path.join(tmp_root, "publisher", - pfmri.publisher, "pkg", pfmri.get_dir_path()) - misc.makedirs(os.path.dirname(dest)) - try: - os.link(src, dest) - except EnvironmentError as e: - raise apx._convert_error(e) - - # Next, copy the old configuration into the new location using - # the new name. The configuration is copied instead of being - # linked so that any changes to configuration as a result of - # the upgrade won't be written into the old image directory. - src = os.path.join(self.imgdir, "disabled_auth") - if os.path.exists(src): - dest = os.path.join(tmp_root, "disabled_auth") - portable.copyfile(src, dest) - - src = self.cfg.target - dest = os.path.join(tmp_root, "pkg5.image") + raise apx._convert_error(e) + + # Next, link history data into place. + linktree(self.history.path, os.path.join(tmp_root, "history")) + + # Next, link index data into place. + linktree(self.index_dir, os.path.join(tmp_root, "cache", "index")) + + # Next, link ssl data into place. + linktree( + os.path.join(self.imgdir, "ssl"), os.path.join(tmp_root, "ssl") + ) + + # Next, write state data into place. + if self.version < 3: + # Image state and publisher metadata + tmp_state_root = os.path.join(tmp_root, "state") + + # Update image catalog locations. + kcat = self.get_catalog(self.IMG_CATALOG_KNOWN) + icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + kcat.meta_root = os.path.join( + tmp_state_root, self.IMG_CATALOG_KNOWN + ) + icat.meta_root = os.path.join( + tmp_state_root, self.IMG_CATALOG_INSTALLED + ) + + # Assume that since mkdirs succeeded that the remaining + # data can be saved and the image structure can be + # upgraded. But first, attempt to save the image + # catalogs. + for cat in icat, kcat: + misc.makedirs(cat.meta_root) + cat.save() + else: + # For version 3 and newer images, just link existing + # state information into place. + linktree(self._statedir, os.path.join(tmp_root, "state")) + + # Reset each publisher's meta_root and ensure its complete + # directory structure is intact. Then either link in or + # write out the metadata for each publisher. + for pub in self.gen_publishers(): + old_root = pub.meta_root + old_cat_root = pub.catalog_root + old_cert_root = pub.cert_root + pub.meta_root = os.path.join(tmp_root, IMG_PUB_DIR, pub.prefix) + pub.create_meta_root() + + if self.version < 3: + # Should be loaded in memory and transformed + # already, so just need to be written out. + pub.catalog.save() + continue + + # Now link any catalog or cert files from the old root + # into the new root. + linktree(old_cat_root, pub.catalog_root) + linktree(old_cert_root, pub.cert_root) + + # Finally, create a directory for the publisher's + # manifests to live in. + misc.makedirs(os.path.join(pub.meta_root, "pkg")) + + # Next, link licenses and manifests of installed packages into + # new image dir. + for pfmri in self.gen_installed_pkgs(): + # Link licenses. + mdir = self.get_manifest_dir(pfmri) + for entry in os.listdir(mdir): + if not entry.startswith("license."): + continue + src = os.path.join(mdir, entry) + if os.path.isdir(src): + # Ignore broken licenses. + continue + + # For conversion, ensure destination link uses + # encoded license name to match how new image + # format stores licenses. + dest = os.path.join( + tmp_root, + "license", + pfmri.get_dir_path(stemonly=True), + quote(entry, ""), + ) + misc.makedirs(os.path.dirname(dest)) try: - portable.copyfile(src, dest) + os.link(src, dest) except EnvironmentError as e: - raise apx._convert_error(e) + raise apx._convert_error(e) + + # Link manifest. + src = self.get_manifest_path(pfmri) + dest = os.path.join( + tmp_root, + "publisher", + pfmri.publisher, + "pkg", + pfmri.get_dir_path(), + ) + misc.makedirs(os.path.dirname(dest)) + try: + os.link(src, dest) + except EnvironmentError as e: + raise apx._convert_error(e) + + # Next, copy the old configuration into the new location using + # the new name. The configuration is copied instead of being + # linked so that any changes to configuration as a result of + # the upgrade won't be written into the old image directory. + src = os.path.join(self.imgdir, "disabled_auth") + if os.path.exists(src): + dest = os.path.join(tmp_root, "disabled_auth") + portable.copyfile(src, dest) + + src = self.cfg.target + dest = os.path.join(tmp_root, "pkg5.image") + try: + portable.copyfile(src, dest) + except EnvironmentError as e: + raise apx._convert_error(e) + + # Update the new configuration's version information and then + # write it out again. + newcfg = imageconfig.ImageConfig( + dest, + tmp_root, + version=3, + overrides={"image": {"version": self.CURRENT_VERSION}}, + ) + newcfg._version = 3 + newcfg.write() + + # Now reload configuration and write again to configuration data + # reflects updated version information. + newcfg.reset() + newcfg.write() + + # Finally, rename the old package metadata directory, then + # rename the new one into place, and then reinitialize. The + # old data will be dumped during initialization. + orig_root = self.imgdir + ".old" + try: + portable.rename(self.imgdir, orig_root) + portable.rename(tmp_root, self.imgdir) + + # /var/pkg/repo is renamed into place instead of being + # linked piece-by-piece for performance reasons. + # Crawling the entire tree structure of a repository is + # far slower than simply renaming the top level + # directory (since it often has thousands or millions + # of objects). + old_repo = os.path.join(orig_root, "repo") + if os.path.exists(old_repo): + new_repo = os.path.join(tmp_root, "repo") + portable.rename(old_repo, new_repo) + except EnvironmentError as e: + raise apx._convert_error(e) + self.find_root(self.root, exact_match=True, progtrack=progtrack) + + def create( + self, + pubs, + facets=EmptyDict, + is_zone=False, + progtrack=None, + props=EmptyDict, + refresh_allowed=True, + variants=EmptyDict, + ): + """Creates a new image with the given attributes if it does not + exist; should not be used with an existing image. + + 'is_zone' is a boolean indicating whether the image is a zone. + + 'pubs' is a list of Publisher objects to configure the image + with. + + 'refresh_allowed' is an optional boolean indicating that + network operations (such as publisher data retrieval) are + allowed. + + 'progtrack' is an optional ProgressTracker object. + + 'props' is an option dictionary mapping image property names to + values. + + 'variants' is an optional dictionary of variant names and + values. + + 'facets' is an optional dictionary of facet names and values. + """ - # Update the new configuration's version information and then - # write it out again. - newcfg = imageconfig.ImageConfig(dest, tmp_root, - version=3, overrides={ "image": { - "version": self.CURRENT_VERSION } }) - newcfg._version = 3 - newcfg.write() - - # Now reload configuration and write again to configuration data - # reflects updated version information. - newcfg.reset() - newcfg.write() - - # Finally, rename the old package metadata directory, then - # rename the new one into place, and then reinitialize. The - # old data will be dumped during initialization. - orig_root = self.imgdir + ".old" + for p in pubs: + p.meta_root = self._get_publisher_meta_root(p.prefix) + p.transport = self.transport + + # Override any initial configuration information. + self.set_properties(props) + + # Start the operation. + self.history.log_operation_start("image-create") + + # Determine and add the default variants for the image. + if is_zone: + self.cfg.variants["variant.opensolaris.zone"] = "nonglobal" + else: + self.cfg.variants["variant.opensolaris.zone"] = "global" + + self.cfg.variants["variant.arch"] = variants.get( + "variant.arch", platform.processor() + ) + + # After setting up the default variants, add any overrides or + # additional variants or facets specified. + self.cfg.variants.update(variants) + self.cfg.facets.update(facets) + + # Now everything is ready for publisher configuration. + # Since multiple publishers are allowed, they are all + # added at once without any publisher data retrieval. + # A single retrieval is then performed afterwards, if + # allowed, to minimize the amount of work the client + # needs to perform. + for p in pubs: + self.add_publisher(p, refresh_allowed=False, progtrack=progtrack) + + if refresh_allowed: + self.refresh_publishers( + progtrack=progtrack, full_refresh=True, ignore_unreachable=False + ) + else: + # initialize empty catalogs on disk + self.__rebuild_image_catalogs(progtrack=progtrack) + + self.cfg.set_property( + "property", "publisher-search-order", [p.prefix for p in pubs] + ) + + # Ensure publisher search order is written. + self.save_config() + + self.history.log_operation_end() + + @staticmethod + def __allow_liveroot(): + """Check if we're allowed to access the current live root + image.""" + + # if we're simulating a live root then allow access to it + if ( + DebugValues.get_value("simulate_live_root") + or "PKG_LIVE_ROOT" in os.environ + ): + return True + + # check if the user disabled access to the live root + if DebugValues.get_value("simulate_no_live_root"): + return False + if "PKG_NO_LIVE_ROOT" in os.environ: + return False + + # by default allow access to the live root + return True + + def is_liveroot(self): + return bool(self.root == misc.liveroot()) + + def is_zone(self): + return self.cfg.variants["variant.opensolaris.zone"] == "nonglobal" + + def get_arch(self): + return self.cfg.variants["variant.arch"] + + def has_boot_archive(self): + """Returns True if a boot_archive is present in this image""" + if self.__boot_archive is not None: + return self.__boot_archive + + for p in [ + "platform/i86pc/amd64/boot_archive", + "platform/i86pc/boot_archive", + "platform/sun4u/boot_archive", + "platform/sun4v/boot_archive", + ]: + if os.path.isfile(os.path.join(self.root, p)): + self.__boot_archive = True + break + else: + self.__boot_archive = False + return self.__boot_archive + + def get_ramdisk_filelist(self): + """return the filelist... add the filelist so we rebuild + boot archive if it changes... append trailing / to + directories that are really there""" + + p = "boot/solaris/filelist.ramdisk" + f = os.path.join(self.root, p) + + def addslash(path): + if os.path.isdir(os.path.join(self.root, path)): + return path + "/" + return path + + if not os.path.isfile(f): + return [] + + return [addslash(l.strip()) for l in open(f)] + [p] + + def get_cachedirs(self): + """Returns a list of tuples of the form (dir, readonly, pub, + layout) where 'dir' is the absolute path of the cache directory, + 'readonly' is a boolean indicating whether the cache can + be written to, 'pub' is the prefix of the publisher that + the cache directory should be used for, and 'layout' is a + FileManager object used to access file content in the cache. + If 'pub' is None, the cache directory is intended for all + publishers. If 'layout' is None, file content layout can + vary. + """ + + file_layout = None + if self.version >= 4: + # Assume cache directories are in V1 Layout if image + # format is v4+. + file_layout = fl.V1Layout() + + # Get all readonly cache directories. + cdirs = [ + (cdir, True, None, file_layout) for cdir in self.__read_cache_dirs + ] + + # Get global write cache directory. + if self.__write_cache_dir: + cdirs.append((self.__write_cache_dir, False, None, file_layout)) + + # For images newer than version 3, file data can be stored + # in the publisher's file root. + if self.version == self.CURRENT_VERSION: + for pub in self.gen_publishers(inc_disabled=True): + froot = os.path.join(pub.meta_root, "file") + readonly = False + if self.__write_cache_dir or self.__write_cache_root: + readonly = True + cdirs.append((froot, readonly, pub.prefix, file_layout)) + + if self.__write_cache_root: + # Cache is a tree structure like + # /var/pkg/publisher. + froot = os.path.join( + self.__write_cache_root, pub.prefix, "file" + ) + cdirs.append((froot, False, pub.prefix, file_layout)) + + return cdirs + + def get_root(self): + return self.root + + def get_last_modified(self, string=False): + """Return the UTC time of the image's last state change or + None if unknown. By default the time is returned via datetime + object. If 'string' is true and a time is available, then the + time is returned as a string (instead of as a datetime + object).""" + + # Always get last_modified time from known catalog. It's + # retrieved from the catalog itself since that is accurate + # down to the micrsecond (as opposed to the filesystem which + # has an OS-specific resolution). + rv = self.__get_catalog(self.IMG_CATALOG_KNOWN).last_modified + if rv is None or not string: + return rv + return rv.strftime("%Y-%m-%dT%H:%M:%S.%f") + + def gen_publishers(self, inc_disabled=False): + if not self.cfg: + raise apx.ImageCfgEmptyError(self.root) + + alt_pubs = {} + if self.__alt_pkg_pub_map: + alt_src_pubs = dict((p.prefix, p) for p in self.__alt_pubs) + + for pfx in self.__alt_known_cat.publishers(): + # Include alternate package source publishers + # in result, and temporarily enable any + # disabled publishers that already exist in + # the image configuration. try: - portable.rename(self.imgdir, orig_root) - portable.rename(tmp_root, self.imgdir) - - # /var/pkg/repo is renamed into place instead of being - # linked piece-by-piece for performance reasons. - # Crawling the entire tree structure of a repository is - # far slower than simply renaming the top level - # directory (since it often has thousands or millions - # of objects). - old_repo = os.path.join(orig_root, "repo") - if os.path.exists(old_repo): - new_repo = os.path.join(tmp_root, "repo") - portable.rename(old_repo, new_repo) - except EnvironmentError as e: + img_pub = self.cfg.publishers[pfx] + + if not img_pub.disabled: + # No override needed. + continue + new_pub = copy.copy(img_pub) + new_pub.disabled = False + + # Discard origins and mirrors to prevent + # their accidental use. + repo = new_pub.repository + repo.reset_origins() + repo.reset_mirrors() + except KeyError: + new_pub = alt_src_pubs[pfx] + + alt_pubs[pfx] = new_pub + + publishers = [ + alt_pubs.get(p.prefix, p) for p in self.cfg.publishers.values() + ] + publishers.extend((p for p in alt_pubs.values() if p not in publishers)) + + for pub in publishers: + # Prepare publishers for transport usage; this must be + # done each time so that information reflects current + # image state. This is done whether or not the + # publisher is returned so that in-memory state is + # always current. + pub.meta_root = self._get_publisher_meta_root(pub.prefix) + pub.transport = self.transport + if inc_disabled or not pub.disabled: + yield pub + + def get_publisher_ranks(self): + """Return dictionary of configured + enabled publishers and + unconfigured publishers which still have packages installed. + + Each entry contains a tuple of search order index starting at + 0, and a boolean indicating whether or not this publisher is + "sticky", and a boolean indicating whether or not the + publisher is enabled""" + + pubs = self.get_sorted_publishers(inc_disabled=False) + ret = dict( + [ + (pubs[i].prefix, (i, pubs[i].sticky, True)) + for i in range(0, len(pubs)) + ] + ) + + # Add any publishers for pkgs that are installed, + # but have been deleted. These publishers are implicitly + # not-sticky and disabled. + for pub in self.get_installed_pubs(): + i = len(ret) + ret.setdefault(pub, (i, False, False)) + return ret + + def get_highest_ranked_publisher(self): + """Return the highest ranked publisher.""" + + pubs = self.cfg.get_property("property", "publisher-search-order") + if pubs: + return self.get_publisher(prefix=pubs[0]) + for p in self.gen_publishers(): + return p + for p in self.get_installed_pubs(): + return publisher.Publisher(p) + return None + + def check_cert_validity(self, pubs=EmptyI): + """Validate the certificates of the specified publishers. + + Raise an exception if any of the certificates has expired or + is close to expiring.""" + + if not pubs: + pubs = self.gen_publishers() + + errors = [] + for p in pubs: + r = p.repository + for uri in r.origins: + if uri.ssl_cert: + try: + misc.validate_ssl_cert( + uri.ssl_cert, prefix=p.prefix, uri=uri + ) + except apx.ExpiredCertificate as e: + errors.append(e) + + if uri.ssl_key: + try: + if not os.path.exists(uri.ssl_key): + raise apx.NoSuchKey( + uri.ssl_key, publisher=p, uri=uri + ) + except EnvironmentError as e: raise apx._convert_error(e) - self.find_root(self.root, exact_match=True, progtrack=progtrack) - - def create(self, pubs, facets=EmptyDict, is_zone=False, progtrack=None, - props=EmptyDict, refresh_allowed=True, variants=EmptyDict): - """Creates a new image with the given attributes if it does not - exist; should not be used with an existing image. - - 'is_zone' is a boolean indicating whether the image is a zone. - - 'pubs' is a list of Publisher objects to configure the image - with. - - 'refresh_allowed' is an optional boolean indicating that - network operations (such as publisher data retrieval) are - allowed. - - 'progtrack' is an optional ProgressTracker object. - 'props' is an option dictionary mapping image property names to - values. + if errors: + raise apx.ExpiredCertificates(errors) - 'variants' is an optional dictionary of variant names and - values. - - 'facets' is an optional dictionary of facet names and values. - """ - - for p in pubs: - p.meta_root = self._get_publisher_meta_root(p.prefix) - p.transport = self.transport - - # Override any initial configuration information. - self.set_properties(props) + def has_publisher(self, prefix=None, alias=None): + """Returns a boolean value indicating whether a publisher + exists in the image configuration that matches the given + prefix or alias.""" + for pub in self.gen_publishers(inc_disabled=True): + if prefix == pub.prefix or (alias and alias == pub.alias): + return True + return False - # Start the operation. - self.history.log_operation_start("image-create") + def remove_publisher(self, prefix=None, alias=None, progtrack=None): + """Removes the publisher with the matching identity from the + image.""" - # Determine and add the default variants for the image. - if is_zone: - self.cfg.variants["variant.opensolaris.zone"] = \ - "nonglobal" - else: - self.cfg.variants["variant.opensolaris.zone"] = \ - "global" - - self.cfg.variants["variant.arch"] = \ - variants.get("variant.arch", platform.processor()) - - # After setting up the default variants, add any overrides or - # additional variants or facets specified. - self.cfg.variants.update(variants) - self.cfg.facets.update(facets) - - # Now everything is ready for publisher configuration. - # Since multiple publishers are allowed, they are all - # added at once without any publisher data retrieval. - # A single retrieval is then performed afterwards, if - # allowed, to minimize the amount of work the client - # needs to perform. - for p in pubs: - self.add_publisher(p, refresh_allowed=False, - progtrack=progtrack) - - if refresh_allowed: - self.refresh_publishers(progtrack=progtrack, - full_refresh=True, ignore_unreachable=False) - else: - # initialize empty catalogs on disk - self.__rebuild_image_catalogs(progtrack=progtrack) + if not progtrack: + progtrack = progress.NullProgressTracker() - self.cfg.set_property("property", "publisher-search-order", - [p.prefix for p in pubs]) + with self.locked_op("remove-publisher"): + pub = self.get_publisher(prefix=prefix, alias=alias) - # Ensure publisher search order is written. - self.save_config() + self.cfg.remove_publisher(pub.prefix) + self.remove_publisher_metadata(pub, progtrack=progtrack) + self.save_config() - self.history.log_operation_end() + def get_publishers(self, inc_disabled=True): + """Return a dictionary of configured publishers. This doesn't + include unconfigured publishers which still have packages + installed.""" - @staticmethod - def __allow_liveroot(): - """Check if we're allowed to access the current live root - image.""" + return dict( + (p.prefix, p) + for p in self.gen_publishers(inc_disabled=inc_disabled) + ) - # if we're simulating a live root then allow access to it - if DebugValues.get_value("simulate_live_root") or \ - "PKG_LIVE_ROOT" in os.environ: - return True + def get_sorted_publishers(self, inc_disabled=True): + """Return a list of configured publishers sorted by rank. + This doesn't include unconfigured publishers which still have + packages installed.""" - # check if the user disabled access to the live root - if DebugValues.get_value("simulate_no_live_root"): - return False - if "PKG_NO_LIVE_ROOT" in os.environ: - return False + d = self.get_publishers(inc_disabled=inc_disabled) + names = self.cfg.get_property("property", "publisher-search-order") - # by default allow access to the live root + # + # If someone has been editing the config file we may have + # unranked publishers. Also, as publisher come and go via the + # sysrepo we can end up with configured but unranked + # publishers. In either case just sort unranked publishers + # alphabetically. + # + unranked = set(d) - set(names) + ret = [d[n] for n in names if n in d] + [d[n] for n in sorted(unranked)] + return ret + + def get_publisher(self, prefix=None, alias=None, origin=None): + for pub in self.gen_publishers(inc_disabled=True): + if prefix and prefix == pub.prefix: + return pub + elif alias and alias == pub.alias: + return pub + elif ( + origin and pub.repository and pub.repository.has_origin(origin) + ): + return pub + + if prefix is None and alias is None and origin is None: + raise apx.UnknownPublisher(None) + + raise apx.UnknownPublisher( + max(i for i in [prefix, alias, origin] if i is not None) + ) + + def pub_search_before(self, being_moved, staying_put): + """Moves publisher "being_moved" to before "staying_put" + in search order. + + The caller is responsible for locking the image.""" + + self.cfg.change_publisher_search_order( + being_moved, staying_put, after=False + ) + + def pub_search_after(self, being_moved, staying_put): + """Moves publisher "being_moved" to after "staying_put" + in search order. + + The caller is responsible for locking the image.""" + + self.cfg.change_publisher_search_order( + being_moved, staying_put, after=True + ) + + def __apply_alt_pkg_sources(self, img_kcat): + pkg_pub_map = self.__alt_pkg_pub_map + if not pkg_pub_map or self.__alt_pkg_sources_loaded: + # No alternate sources to merge. + return + + # Temporarily merge the package metadata in the alternate + # known package catalog for packages not listed in the + # image's known catalog. + def merge_check(alt_kcat, pfmri, new_entry): + states = new_entry["metadata"]["states"] + if pkgdefs.PKG_STATE_INSTALLED in states: + # Not interesting; already installed. + return False, None + img_entry = img_kcat.get_entry(pfmri=pfmri) + if not img_entry is None: + # Already in image known catalog. + return False, None + return True, new_entry + + img_kcat.append(self.__alt_known_cat, cb=merge_check) + img_kcat.finalize() + + self.__alt_pkg_sources_loaded = True + self.transport.cfg.pkg_pub_map = self.__alt_pkg_pub_map + self.transport.cfg.alt_pubs = self.__alt_pubs + self.transport.cfg.reset_caches() + + def __cleanup_alt_pkg_certs(self): + """Private helper function to cleanup package certificate + information after use of temporary package data.""" + + if not self.__alt_pubs: + return + + # Cleanup publisher cert information; any certs not retrieved + # retrieved during temporary publisher use need to be expunged + # from the image configuration. + for pub in self.__alt_pubs: + try: + ipub = self.cfg.publishers[pub.prefix] + except KeyError: + # Nothing to do. + continue + + def set_alt_pkg_sources(self, alt_sources): + """Specifies an alternate source of package metadata to be + temporarily merged with image state so that it can be used + as part of packaging operations.""" + + if not alt_sources: + self.__init_catalogs() + self.__alt_pkg_pub_map = None + self.__alt_pubs = None + self.__alt_known_cat = None + self.__alt_pkg_sources_loaded = False + self.transport.cfg.pkg_pub_map = None + self.transport.cfg.alt_pubs = None + self.transport.cfg.reset_caches() + return + elif self.__alt_pkg_sources_loaded: + # Ensure existing alternate package source data + # is not part of temporary image state. + self.__init_catalogs() + + pkg_pub_map, alt_pubs, alt_kcat, ignored = alt_sources + self.__alt_pkg_pub_map = pkg_pub_map + self.__alt_pubs = alt_pubs + self.__alt_known_cat = alt_kcat + + def set_highest_ranked_publisher(self, prefix=None, alias=None, pub=None): + """Sets the preferred publisher for packaging operations. + + 'prefix' is an optional string value specifying the name of + a publisher; ignored if 'pub' is provided. + + 'alias' is an optional string value specifying the alias of + a publisher; ignored if 'pub' is provided. + + 'pub' is an optional Publisher object identifying the + publisher to set as the preferred publisher. + + One of the above parameters must be provided. + + The caller is responsible for locking the image.""" + + if not pub: + pub = self.get_publisher(prefix=prefix, alias=alias) + if not self.cfg.allowed_to_move(pub): + raise apx.ModifyingSyspubException( + _( + "Publisher '{0}' " + "is a system publisher and cannot be " + "moved." + ).format(pub) + ) + + pubs = self.get_sorted_publishers() + relative = None + for p in pubs: + # If we've gotten to the publisher we want to make + # highest ranked, then there's nothing to do because + # it's already as high as it can be. + if p == pub: + return + if self.cfg.allowed_to_move(p): + relative = p + break + assert relative, ( + "Expected {0} to already be part of the " + + "search order:{1}".format(relative, ranks) + ) + self.cfg.change_publisher_search_order( + pub.prefix, relative.prefix, after=False + ) + + def set_property(self, prop_name, prop_value): + with self.locked_op("set-property"): + self.cfg.set_property("property", prop_name, prop_value) + self.save_config() + + def set_properties(self, properties): + properties = {"property": properties} + with self.locked_op("set-property"): + self.cfg.set_properties(properties) + self.save_config() + + def get_property(self, prop_name): + return self.cfg.get_property("property", prop_name) + + def has_property(self, prop_name): + try: + self.cfg.get_property("property", prop_name) + return True + except cfg.ConfigError: + return False + + def delete_property(self, prop_name): + with self.locked_op("unset-property"): + self.cfg.remove_property("property", prop_name) + self.save_config() + + def add_property_value(self, prop_name, prop_value): + with self.locked_op("add-property-value"): + self.cfg.add_property_value("property", prop_name, prop_value) + self.save_config() + + def remove_property_value(self, prop_name, prop_value): + with self.locked_op("remove-property-value"): + self.cfg.remove_property_value("property", prop_name, prop_value) + self.save_config() + + def destroy(self): + """Destroys the image; image object should not be used + afterwards.""" + + if not self.imgdir or not os.path.exists(self.imgdir): + return + + if os.path.abspath(self.imgdir) == "/": + # Paranoia. + return + + try: + shutil.rmtree(self.imgdir) + except EnvironmentError as e: + raise apx._convert_error(e) + + def properties(self): + if not self.cfg: + raise apx.ImageCfgEmptyError(self.root) + return list(self.cfg.get_index()["property"].keys()) + + def add_publisher( + self, + pub, + refresh_allowed=True, + progtrack=None, + approved_cas=EmptyI, + revoked_cas=EmptyI, + search_after=None, + search_before=None, + search_first=None, + unset_cas=EmptyI, + ): + """Adds the provided publisher object to the image + configuration. + + 'refresh_allowed' is an optional, boolean value indicating + whether the publisher's metadata should be retrieved when adding + it to the image's configuration. + + 'progtrack' is an optional ProgressTracker object.""" + + with self.locked_op("add-publisher"): + return self.__add_publisher( + pub, + refresh_allowed=refresh_allowed, + progtrack=progtrack, + approved_cas=EmptyI, + revoked_cas=EmptyI, + search_after=search_after, + search_before=search_before, + search_first=search_first, + unset_cas=EmptyI, + ) + + def __update_publisher_catalogs( + self, pub, progtrack=None, refresh_allowed=True + ): + # Ensure that if the publisher's meta directory already + # exists for some reason that the data within is not + # used. + self.remove_publisher_metadata(pub, progtrack=progtrack, rebuild=False) + + repo = pub.repository + if refresh_allowed and repo.origins: + try: + # First, verify that the publisher has a + # valid pkg(7) repository. + self.transport.valid_publisher_test(pub) + pub.validate_config() + self.refresh_publishers( + pubs=[pub], progtrack=progtrack, ignore_unreachable=False + ) + except Exception as e: + # Remove the newly added publisher since + # it is invalid or the retrieval failed. + if not pub.sys_pub: + self.cfg.remove_publisher(pub.prefix) + raise + except: + # Remove the newly added publisher since + # the retrieval failed. + if not pub.sys_pub: + self.cfg.remove_publisher(pub.prefix) + raise + + def __add_publisher( + self, + pub, + refresh_allowed=True, + progtrack=None, + approved_cas=EmptyI, + revoked_cas=EmptyI, + search_after=None, + search_before=None, + search_first=None, + unset_cas=EmptyI, + ): + """Private version of add_publisher(); caller is responsible + for locking.""" + + assert ( + (not search_after and not search_before) + or (not search_after and not search_first) + or (not search_before and not search_first) + ) + + if self.version < self.CURRENT_VERSION: + raise apx.ImageFormatUpdateNeeded(self.root) + + for p in self.cfg.publishers.values(): + if ( + pub.prefix == p.prefix + or pub.prefix == p.alias + or pub.alias + and (pub.alias == p.alias or pub.alias == p.prefix) + ): + raise apx.DuplicatePublisher(pub) + + if not progtrack: + progtrack = progress.NullProgressTracker() + + # Must assign this first before performing operations. + pub.meta_root = self._get_publisher_meta_root(pub.prefix) + pub.transport = self.transport + + # Before continuing, validate SSL information. + try: + self.check_cert_validity(pubs=[pub]) + except apx.ExpiringCertificate as e: + logger.error(str(e)) + + self.cfg.publishers[pub.prefix] = pub + + self.__update_publisher_catalogs( + pub, progtrack=progtrack, refresh_allowed=refresh_allowed + ) + + for ca in approved_cas: + try: + ca = os.path.abspath(ca) + fh = open(ca, "r") + s = fh.read() + fh.close() + except EnvironmentError as e: + if e.errno == errno.ENOENT: + raise apx.MissingFileArgumentException(ca) + raise apx._convert_error(e) + pub.approve_ca_cert(s, manual=True) + + for hsh in revoked_cas: + pub.revoke_ca_cert(hsh) + + for hsh in unset_cas: + pub.unset_ca_cert(hsh) + + if search_first: + self.set_highest_ranked_publisher(prefix=pub.prefix) + elif search_before: + self.pub_search_before(pub.prefix, search_before) + elif search_after: + self.pub_search_after(pub.prefix, search_after) + + # Only after success should the configuration be saved. + self.save_config() + + def __process_verify( + self, + act, + path, + path_only, + fmri, + excludes, + vardrate_excludes, + progresstracker, + verifypaths=None, + overlaypaths=None, + **kwargs, + ): + errors = [] + warnings = [] + info = [] + if act.include_this(excludes, publisher=fmri.publisher): + if not path_only: + errors, warnings, info = act.verify(self, pfmri=fmri, **kwargs) + elif path in verifypaths or path in overlaypaths: + if path in verifypaths: + progresstracker.plan_add_progress( + progresstracker.PLAN_PKG_VERIFY + ) + + errors, warnings, info = act.verify(self, pfmri=fmri, **kwargs) + # It's safe to immediately discard this + # match as only one action can deliver a + # path with overlay=allow and only one with + # overlay=true. + overlaypaths.discard(path) + if act.attrs.get("overlay") == "allow": + overlaypaths.add(path) + verifypaths.discard(path) + elif ( + act.include_this(vardrate_excludes, publisher=fmri.publisher) + and not act.refcountable + ): + # Verify that file that is faceted out does not + # exist. Exclude actions which may be delivered + # from multiple packages. + if path is not None and os.path.exists( + os.path.join(self.root, path) + ): + errors.append(_("File should not exist")) + else: + # Action that is not applicable to image variant + # or has been dehydrated. + return None, None, None, True + return errors, warnings, info, False + + def verify( + self, + fmri, + progresstracker, + verifypaths=None, + overlaypaths=None, + single_act=None, + **kwargs, + ): + """Generator that returns a tuple of the form (action, errors, + warnings, info) if there are any error, warning, or other + messages about an action contained within the specified + package. Where the returned messages are lists of strings + indicating fatal problems, potential issues (that can be + ignored), or extra information to be displayed respectively. + + 'fmri' is the fmri of the package to verify. + + 'progresstracker' is a ProgressTracker object. + + 'verifypaths' is the set of paths to verify. + + 'overlaypaths' is the set of overlaying path to verify. + + 'single_act' is the only action of the specified fmri to + verify. + + 'kwargs' is a dict of additional keyword arguments to be passed + to each action verification routine.""" + + path_only = bool(verifypaths or overlaypaths) + # pkg verify only looks at actions that have not been dehydrated. + excludes = self.list_excludes() + vardrate_excludes = [self.cfg.variants.allow_action] + dehydrate = self.cfg.get_property("property", "dehydrated") + if dehydrate: + func = self.get_dehydrated_exclude_func(dehydrate) + excludes.append(func) + vardrate_excludes.append(func) + + # If single_act is set, only that action will be processed. + if single_act: + overlay = None + if single_act.attrs.get("overlay") == "allow": + overlay = "overlaid" + elif single_act.attrs.get("overlay") == "true": + overlay = "overlaying" + progresstracker.plan_add_progress( + progresstracker.PLAN_PKG_VERIFY, nitems=0 + ) + path = single_act.attrs.get("path") + errors, warnings, info, ignore = self.__process_verify( + single_act, + path, + path_only, + fmri, + excludes, + vardrate_excludes, + progresstracker, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + **kwargs, + ) + if (errors or warnings or info) and not ignore: + yield single_act, errors, warnings, info, overlay + return + + try: + pub = self.get_publisher(prefix=fmri.publisher) + except apx.UnknownPublisher: + # Since user removed publisher, assume this is the same + # as if they had set signature-policy ignore for the + # publisher. + sig_pol = None + else: + sig_pol = self.signature_policy.combine(pub.signature_policy) + + if not path_only: + progresstracker.plan_add_progress(progresstracker.PLAN_PKG_VERIFY) + + manf = self.get_manifest(fmri, ignore_excludes=True) + sigs = list( + manf.gen_actions_by_type("signature", excludes=self.list_excludes()) + ) + if sig_pol and (sigs or sig_pol.name != "ignore"): + # Only perform signature verification logic if there are + # signatures or if signature-policy is not 'ignore'. + try: + # Signature verification must be done using all + # the actions from the manifest, not just the + # ones for this image's variants. + sig_pol.process_signatures( + sigs, + manf.gen_actions(), + pub, + self.trust_anchors, + self.cfg.get_policy("check-certificate-revocation"), + ) + except apx.SigningException as e: + e.pfmri = fmri + yield e.sig, [e], [], [], None + except apx.InvalidResourceLocation as e: + yield None, [e], [], [], None + + def mediation_allowed(act): + """Helper function to determine if the mediation + delivered by a link is allowed. If it is, then + the link should be verified. (Yes, this does mean + that the non-existence of links is not verified.) + """ + + mediator = act.attrs.get("mediator") + if not mediator or mediator not in self.cfg.mediators: + # Link isn't mediated or mediation is unknown. return True - def is_liveroot(self): - return bool(self.root == misc.liveroot()) - - def is_zone(self): - return self.cfg.variants["variant.opensolaris.zone"] == \ - "nonglobal" - - def get_arch(self): - return self.cfg.variants["variant.arch"] - - def has_boot_archive(self): - """Returns True if a boot_archive is present in this image""" - if self.__boot_archive is not None: - return self.__boot_archive + cfg_med_version = self.cfg.mediators[mediator].get("version") + cfg_med_impl = self.cfg.mediators[mediator].get("implementation") + + med_version = act.attrs.get("mediator-version") + if med_version: + med_version = pkg.version.Version(med_version) + med_impl = act.attrs.get("mediator-implementation") + + return med_version == cfg_med_version and med.mediator_impl_matches( + med_impl, cfg_med_impl + ) + + for act in manf.gen_actions(): + path = act.attrs.get("path") + # Defer verification on actions with 'overlay' + # attribute = 'allow'. + if not path_only: + if act.attrs.get("overlay") == "true": + yield act, [], [], [], "overlaying" + continue + elif act.attrs.get("overlay"): + yield act, [], [], [], "overlaid" + continue + + progresstracker.plan_add_progress( + progresstracker.PLAN_PKG_VERIFY, nitems=0 + ) + + if ( + act.name == "link" or act.name == "hardlink" + ) and not mediation_allowed(act): + # Link doesn't match configured + # mediation, so shouldn't be verified. + continue + + errors, warnings, info, ignore = self.__process_verify( + act, + path, + path_only, + fmri, + excludes, + vardrate_excludes, + progresstracker, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + **kwargs, + ) + if (errors or warnings or info) and not ignore: + yield act, errors, warnings, info, None + + def image_config_update(self, new_variants, new_facets, new_mediators): + """update variants in image config""" + + if new_variants is not None: + self.cfg.variants.update(new_variants) + if new_facets is not None: + self.cfg.facets = new_facets + if new_mediators is not None: + self.cfg.mediators = new_mediators + self.save_config() + + def __verify_manifest(self, fmri, mfstpath, alt_pub=None): + """Verify a manifest. The caller must supply the FMRI + for the package in 'fmri', as well as the path to the + manifest file that will be verified.""" + + try: + return self.transport._verify_manifest( + fmri, mfstpath=mfstpath, pub=alt_pub + ) + except InvalidContentException: + return False + + def has_manifest(self, pfmri, alt_pub=None): + """Check to see if the manifest for pfmri is present on disk and + has the correct hash.""" + + pth = self.get_manifest_path(pfmri) + on_disk = os.path.exists(pth) + + if ( + not on_disk + or self.is_pkg_installed(pfmri) + or self.__verify_manifest(fmri=pfmri, mfstpath=pth, alt_pub=alt_pub) + ): + return on_disk + return False + + def get_license_dir(self, pfmri): + """Return path to package license directory.""" + if self.version == self.CURRENT_VERSION: + # Newer image format stores license files per-stem, + # instead of per-stem and version, so that transitions + # between package versions don't require redelivery + # of license files. + return os.path.join( + self.imgdir, "license", pfmri.get_dir_path(stemonly=True) + ) + # Older image formats store license files in the manifest cache + # directory. + return self.get_manifest_dir(pfmri) + + def __get_installed_pkg_publisher(self, pfmri): + """Returns the publisher for the FMRI of an installed package + or None if the package is not installed. + """ + for f in self.gen_installed_pkgs(): + if f.pkg_name == pfmri.pkg_name: + return f.publisher + return None + + def get_manifest_dir(self, pfmri): + """Return path to on-disk manifest cache directory.""" + if not pfmri.publisher: + # Needed for consumers such as search that don't provide + # publisher information. + pfmri = pfmri.copy() + pfmri.publisher = self.__get_installed_pkg_publisher(pfmri) + assert pfmri.publisher + if self.version == self.CURRENT_VERSION: + root = self._get_publisher_cache_root(pfmri.publisher) + else: + root = self.imgdir + return os.path.join(root, "pkg", pfmri.get_dir_path()) + + def get_manifest_path(self, pfmri): + """Return path to on-disk manifest file.""" + if not pfmri.publisher: + # Needed for consumers such as search that don't provide + # publisher information. + pfmri = pfmri.copy() + pfmri.publisher = self.__get_installed_pkg_publisher(pfmri) + assert pfmri.publisher + if self.version == self.CURRENT_VERSION: + root = os.path.join(self._get_publisher_meta_root(pfmri.publisher)) + return os.path.join(root, "pkg", pfmri.get_dir_path()) + return os.path.join(self.get_manifest_dir(pfmri), "manifest") + + def __get_manifest(self, fmri, excludes=EmptyI, intent=None, alt_pub=None): + """Find on-disk manifest and create in-memory Manifest + object.... grab from server if needed""" + + try: + if not self.has_manifest(fmri, alt_pub=alt_pub): + raise KeyError + ret = manifest.FactoredManifest( + fmri, + self.get_manifest_dir(fmri), + excludes=excludes, + pathname=self.get_manifest_path(fmri), + ) + + # if we have a intent string, let depot + # know for what we're using the cached manifest + if intent: + alt_repo = None + if alt_pub: + alt_repo = alt_pub.repository + try: + self.transport.touch_manifest( + fmri, intent, alt_repo=alt_repo + ) + except (apx.UnknownPublisher, apx.TransportError): + # It's not fatal if we can't find + # or reach the publisher. + pass + except KeyError: + ret = self.transport.get_manifest( + fmri, excludes, intent, pub=alt_pub + ) + return ret + + def get_manifest( + self, fmri, ignore_excludes=False, intent=None, alt_pub=None + ): + """return manifest; uses cached version if available. + ignore_excludes controls whether manifest contains actions + for all variants + + If 'ignore_excludes' is set to True, then all actions in the + manifest are included, regardless of variant or facet tags. If + set to False, then the variants and facets currently set in the + image will be applied, potentially filtering out some of the + actions.""" + + # Normally elide other arch variants, facets + + if ignore_excludes: + excludes = EmptyI + else: + excludes = [ + self.cfg.variants.allow_action, + self.cfg.facets.allow_action, + ] + + try: + m = self.__get_manifest( + fmri, excludes=excludes, intent=intent, alt_pub=alt_pub + ) + except apx.ActionExecutionError as e: + raise + except pkg.actions.ActionError as e: + raise apx.InvalidPackageErrors([e]) + + return m + + def __catalog_save(self, cats, pfmris, progtrack): + # Temporarily redirect the catalogs to a different location, + # so that if the save is interrupted, the image won't be left + # with invalid state, and then save them. + tmp_state_root = self.temporary_dir() + + try: + for cat, name in cats: + cpath = os.path.join(tmp_state_root, name) + + # Must copy the old catalog data to the new + # destination as only changed files will be + # written. + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + misc.copytree(cat.meta_root, cpath) + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + cat.meta_root = cpath + cat.finalize(pfmris=pfmris) + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + cat.save() + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + + del cat, name + self.__init_catalogs() + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + + # copy any other state files from current state + # dir into new state dir. + for p in os.listdir(self._statedir): + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + fp = os.path.join(self._statedir, p) + if os.path.isfile(fp): + portable.copyfile(fp, os.path.join(tmp_state_root, p)) + + # Next, preserve the old installed state dir, rename the + # new one into place, and then remove the old one. + orig_state_root = self.salvage(self._statedir, full_path=True) + portable.rename(tmp_state_root, self._statedir) + + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + shutil.rmtree(orig_state_root, True) + + progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) + except EnvironmentError as e: + # shutil.Error can contains a tuple of lists of errors. + # Some of the error entries may be a tuple others will + # be a string due to poor error handling in shutil. + if isinstance(e, shutil.Error) and type(e.args[0]) == list: + msg = "" + for elist in e.args: + for entry in elist: + if type(entry) == tuple: + msg += "{0}\n".format(entry[-1]) + else: + msg += "{0}\n".format(entry) + raise apx.UnknownErrors(msg) + raise apx._convert_error(e) + finally: + # Regardless of success, the following must happen. + self.__init_catalogs() + if os.path.exists(tmp_state_root): + shutil.rmtree(tmp_state_root, True) + + def update_pkg_installed_state(self, pkg_pairs, progtrack, origin): + """Sets the recorded installed state of each package pair in + 'pkg_pairs'. 'pkg_pair' should be an iterable of tuples of + the format (added, removed) where 'removed' is the FMRI of the + package that was uninstalled, and 'added' is the package + installed for the operation. These pairs are representative of + the destination and origin package for each part of the + operation.""" + + if self.version < self.CURRENT_VERSION: + raise apx.ImageFormatUpdateNeeded(self.root) + + kcat = self.get_catalog(self.IMG_CATALOG_KNOWN) + icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + + added = set() + removed = set() + manual = set() + updated = {} + for add_pkg, rem_pkg in pkg_pairs: + if add_pkg == rem_pkg: + continue + if add_pkg: + added.add(add_pkg) + if add_pkg in origin: + manual.add(add_pkg) + if rem_pkg: + removed.add(rem_pkg) + if add_pkg and rem_pkg: + updated[add_pkg] = dict( + kcat.get_entry(rem_pkg).get("metadata", {}) + ) - for p in ["platform/i86pc/amd64/boot_archive", - "platform/i86pc/boot_archive", - "platform/sun4u/boot_archive", - "platform/sun4v/boot_archive"]: - if os.path.isfile(os.path.join(self.root, p)): - self.__boot_archive = True - break + combo = added.union(removed) + + # If PKG_AUTOINSTALL is set in the environment, don't mark + # the installed packages as 'manually installed'. This is + # used by Kayak when creating the initial ZFS send stream. + if "PKG_AUTOINSTALL" in os.environ: + manual = set() + + progtrack.job_start(progtrack.JOB_STATE_DB) + # 'Updating package state database' + for pfmri in combo: + progtrack.job_add_progress(progtrack.JOB_STATE_DB) + entry = kcat.get_entry(pfmri) + mdata = entry.get("metadata", {}) + states = set(mdata.get("states", set())) + if pfmri in removed: + icat.remove_package(pfmri) + states.discard(pkgdefs.PKG_STATE_INSTALLED) + states.discard(pkgdefs.PKG_STATE_MANUAL) + mdata.pop("last-install", None) + mdata.pop("last-update", None) + + if pfmri in added: + states.add(pkgdefs.PKG_STATE_INSTALLED) + if pfmri in manual: + states.add(pkgdefs.PKG_STATE_MANUAL) + cur_time = pkg.catalog.now_to_basic_ts() + if pfmri in updated: + last_install = updated[pfmri].get("last-install") + if last_install: + mdata["last-install"] = last_install + mdata["last-update"] = cur_time + else: + mdata["last-install"] = cur_time + ostates = set(updated[pfmri].get("states", set())) + if pkgdefs.PKG_STATE_MANUAL in ostates: + states.add(pkgdefs.PKG_STATE_MANUAL) else: - self.__boot_archive = False - return self.__boot_archive - - def get_ramdisk_filelist(self): - """return the filelist... add the filelist so we rebuild - boot archive if it changes... append trailing / to - directories that are really there""" - - p = "boot/solaris/filelist.ramdisk" - f = os.path.join(self.root, p) - - def addslash(path): - if os.path.isdir(os.path.join(self.root, path)): - return path + "/" - return path - - if not os.path.isfile(f): - return [] - - return [ addslash(l.strip()) for l in open(f) ] + [p] - - def get_cachedirs(self): - """Returns a list of tuples of the form (dir, readonly, pub, - layout) where 'dir' is the absolute path of the cache directory, - 'readonly' is a boolean indicating whether the cache can - be written to, 'pub' is the prefix of the publisher that - the cache directory should be used for, and 'layout' is a - FileManager object used to access file content in the cache. - If 'pub' is None, the cache directory is intended for all - publishers. If 'layout' is None, file content layout can - vary. - """ - - file_layout = None - if self.version >= 4: - # Assume cache directories are in V1 Layout if image - # format is v4+. - file_layout = fl.V1Layout() + mdata["last-install"] = cur_time + if pkgdefs.PKG_STATE_ALT_SOURCE in states: + states.discard(pkgdefs.PKG_STATE_UPGRADABLE) + states.discard(pkgdefs.PKG_STATE_ALT_SOURCE) + states.discard(pkgdefs.PKG_STATE_KNOWN) + elif pkgdefs.PKG_STATE_KNOWN not in states: + # This entry is no longer available and has no + # meaningful state information, so should be + # discarded. + kcat.remove_package(pfmri) + progtrack.job_add_progress(progtrack.JOB_STATE_DB) + continue + + if ( + pkgdefs.PKG_STATE_INSTALLED in states + and pkgdefs.PKG_STATE_UNINSTALLED in states + ) or ( + pkgdefs.PKG_STATE_KNOWN in states + and pkgdefs.PKG_STATE_UNKNOWN in states + ): + raise apx.ImagePkgStateError(pfmri, states) + + # Catalog format only supports lists. + mdata["states"] = list(states) + + # Now record the package state. + kcat.update_entry(mdata, pfmri=pfmri) + + # If the package is being marked as installed, + # then it shouldn't already exist in the + # installed catalog and should be added. + if pfmri in added: + icat.append(kcat, pfmri=pfmri) + + entry = mdata = states = None + progtrack.job_add_progress(progtrack.JOB_STATE_DB) + progtrack.job_done(progtrack.JOB_STATE_DB) + + # Discard entries for alternate source packages that weren't + # installed as part of the operation. + if self.__alt_pkg_pub_map: + for pfmri in self.__alt_known_cat.fmris(): + if pfmri in added: + # Nothing to do. + continue + + entry = kcat.get_entry(pfmri) + if not entry: + # The only reason that the entry should + # not exist in the 'known' part is + # because it was removed during the + # operation. + assert pfmri in removed + continue + + states = entry.get("metadata", {}).get("states", EmptyI) + if pkgdefs.PKG_STATE_ALT_SOURCE in states: + kcat.remove_package(pfmri) + + # Now add the publishers of packages that were installed + # from temporary sources that did not previously exist + # to the image's configuration. (But without any + # origins, sticky, and enabled.) + cfgpubs = set(self.cfg.publishers.keys()) + instpubs = set(f.publisher for f in added) + altpubs = self.__alt_known_cat.publishers() + + # List of publishers that need to be added is the + # intersection of installed and alternate minus + # the already configured. + newpubs = (instpubs & altpubs) - cfgpubs + # Sort the set to get a deterministic output. + for pfx in sorted(newpubs): + npub = publisher.Publisher( + pfx, repository=publisher.Repository() + ) + self.__add_publisher(npub, refresh_allowed=False) + + # Ensure image configuration reflects new information. + self.__cleanup_alt_pkg_certs() + self.save_config() + + # Remove manifests of packages that were removed from the + # system. Some packages may have only had facets or + # variants changed, so don't remove those. + + # 'Updating package cache' + progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=len(removed)) + for pfmri in removed: + mcdir = self.get_manifest_dir(pfmri) + manifest.FactoredManifest.clear_cache(mcdir) + + # Remove package cache directory if possible; we don't + # care if it fails. + try: + os.rmdir(os.path.dirname(mcdir)) + except: + pass + + mpath = self.get_manifest_path(pfmri) + try: + portable.remove(mpath) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise apx._convert_error(e) + + # Remove package manifest directory if possible; we + # don't care if it fails. + try: + os.rmdir(os.path.dirname(mpath)) + except: + pass + progtrack.job_add_progress(progtrack.JOB_PKG_CACHE) + progtrack.job_done(progtrack.JOB_PKG_CACHE) + + progtrack.job_start(progtrack.JOB_IMAGE_STATE) + + self.__catalog_save( + [ + (kcat, self.IMG_CATALOG_KNOWN), + (icat, self.IMG_CATALOG_INSTALLED), + ], + added, + progtrack, + ) + + progtrack.job_done(progtrack.JOB_IMAGE_STATE) + + def get_catalog(self, name): + """Returns the requested image catalog. + + 'name' must be one of the following image constants: + IMG_CATALOG_KNOWN + The known catalog contains all of packages that are + installed or available from a publisher's repository. + + IMG_CATALOG_INSTALLED + The installed catalog is a subset of the 'known' + catalog that only contains installed packages.""" + + if not self.imgdir: + raise RuntimeError("self.imgdir must be set") + + cat = self.__catalogs.get(name) + if not cat: + cat = self.__get_catalog(name) + self.__catalogs[name] = cat + + if name == self.IMG_CATALOG_KNOWN: + # Apply alternate package source data every time that + # the known catalog is requested. + self.__apply_alt_pkg_sources(cat) + + return cat + + def _manifest_cb(self, cat, f): + # Only allow lazy-load for packages from non-v1 sources. + # Assume entries for other sources have all data + # required in catalog. This prevents manifest retrieval + # for packages that don't have any related action data + # in the catalog because they don't have any related + # action data in their manifest. + entry = cat.get_entry(f) + states = entry["metadata"]["states"] + if pkgdefs.PKG_STATE_V1 not in states: + return self.get_manifest(f, ignore_excludes=True) + return + + def __get_catalog(self, name): + """Private method to retrieve catalog; this bypasses the + normal automatic caching (unless the image hasn't been + upgraded yet).""" + + if self.__upgraded and self.version < 3: + # Assume the catalog is already cached in this case + # and can't be reloaded from disk as it doesn't exist + # on disk yet. + return self.__catalogs[name] + + croot = os.path.join(self._statedir, name) + try: + os.makedirs(croot) + except EnvironmentError as e: + if e.errno in (errno.EACCES, errno.EROFS): + # Allow operations to work for + # unprivileged users. + croot = None + elif e.errno != errno.EEXIST: + raise + + # batch_mode is set to True here as any operations that modify + # the catalogs (add or remove entries) are only done during an + # image upgrade or metadata refresh. In both cases, the catalog + # is resorted and finalized so this is always safe to use. + cat = pkg.catalog.Catalog( + batch_mode=True, + manifest_cb=self._manifest_cb, + meta_root=croot, + sign=False, + ) + return cat + + def __remove_catalogs(self): + """Removes all image catalogs and their directories.""" + + self.__init_catalogs() + for name in (self.IMG_CATALOG_KNOWN, self.IMG_CATALOG_INSTALLED): + shutil.rmtree(os.path.join(self._statedir, name)) + + def get_version_installed(self, pfmri): + """Returns an fmri of the installed package matching the + package stem of the given fmri or None if no match is found.""" + + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + for ver, fmris in cat.fmris_by_version(pfmri.pkg_name): + return fmris[0] + return None + + def get_pkg_repo(self, pfmri): + """Returns the repository object containing the origins that + should be used to retrieve the specified package or None if + it can be retrieved from all sources or is not a known package. + """ - # Get all readonly cache directories. - cdirs = [ - (cdir, True, None, file_layout) - for cdir in self.__read_cache_dirs - ] + assert pfmri.publisher + cat = self.get_catalog(self.IMG_CATALOG_KNOWN) + entry = cat.get_entry(pfmri) + if entry is None: + # Package not known. + return + + try: + slist = entry["metadata"]["sources"] + except KeyError: + # Can be retrieved from any source. + return + else: + if not slist: + # Can be retrieved from any source. + return - # Get global write cache directory. - if self.__write_cache_dir: - cdirs.append((self.__write_cache_dir, False, None, - file_layout)) - - # For images newer than version 3, file data can be stored - # in the publisher's file root. - if self.version == self.CURRENT_VERSION: - for pub in self.gen_publishers(inc_disabled=True): - froot = os.path.join(pub.meta_root, "file") - readonly = False - if self.__write_cache_dir or \ - self.__write_cache_root: - readonly = True - cdirs.append((froot, readonly, pub.prefix, - file_layout)) - - if self.__write_cache_root: - # Cache is a tree structure like - # /var/pkg/publisher. - froot = os.path.join( - self.__write_cache_root, pub.prefix, - "file") - cdirs.append((froot, False, pub.prefix, - file_layout)) - - return cdirs - - def get_root(self): - return self.root - - def get_last_modified(self, string=False): - """Return the UTC time of the image's last state change or - None if unknown. By default the time is returned via datetime - object. If 'string' is true and a time is available, then the - time is returned as a string (instead of as a datetime - object).""" - - # Always get last_modified time from known catalog. It's - # retrieved from the catalog itself since that is accurate - # down to the micrsecond (as opposed to the filesystem which - # has an OS-specific resolution). - rv = self.__get_catalog(self.IMG_CATALOG_KNOWN).last_modified - if rv is None or not string: - return rv - return rv.strftime("%Y-%m-%dT%H:%M:%S.%f") - - def gen_publishers(self, inc_disabled=False): - if not self.cfg: - raise apx.ImageCfgEmptyError(self.root) - - alt_pubs = {} - if self.__alt_pkg_pub_map: - alt_src_pubs = dict( - (p.prefix, p) - for p in self.__alt_pubs + pub = self.get_publisher(prefix=pfmri.publisher) + repo = copy.copy(pub.repository) + norigins = [o for o in repo.origins if o.uri in slist] + + if not norigins: + # Known sources don't match configured; return so that + # caller can fallback to default behaviour. + return + + repo.origins = norigins + return repo + + def get_pkg_state(self, pfmri): + """Returns the list of states a package is in for this image.""" + + cat = self.get_catalog(self.IMG_CATALOG_KNOWN) + entry = cat.get_entry(pfmri) + if entry is None: + return [] + return entry["metadata"]["states"] + + def is_pkg_installed(self, pfmri): + """Returns a boolean value indicating whether the specified + package is installed.""" + + # Avoid loading the installed catalog if the known catalog + # is already loaded. This is safe since the installed + # catalog is a subset of the known, and a specific entry + # is being retrieved. + if not self.__catalog_loaded(self.IMG_CATALOG_KNOWN): + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + else: + cat = self.get_catalog(self.IMG_CATALOG_KNOWN) + + entry = cat.get_entry(pfmri) + if entry is None: + return False + states = entry["metadata"]["states"] + return pkgdefs.PKG_STATE_INSTALLED in states + + def list_excludes(self, new_variants=None, new_facets=None): + """Generate a list of callables that each return True if an + action is to be included in the image using the currently + defined variants & facets for the image, or an updated set if + new_variants or new_facets are specified.""" + + if new_variants: + new_vars = self.cfg.variants.copy() + new_vars.update(new_variants) + var_call = new_vars.allow_action + else: + var_call = self.cfg.variants.allow_action + if new_facets is not None: + fac_call = new_facets.allow_action + else: + fac_call = self.cfg.facets.allow_action + + return [var_call, fac_call] + + def get_variants(self): + """return a copy of the current image variants""" + return self.cfg.variants.copy() + + def get_facets(self): + """Return a copy of the current image facets""" + return self.cfg.facets.copy() + + def __state_updating_pathname(self): + """Return the path to a flag file indicating that the image + catalog is being updated.""" + return os.path.join(self._statedir, self.__STATE_UPDATING_FILE) + + def __start_state_update(self): + """Called when we start updating the image catalog. Normally + returns False, but will return True if a previous update was + interrupted.""" + + # get the path to the image catalog update flag file + pathname = self.__state_updating_pathname() + + # if the flag file exists a previous update was interrupted so + # return 1 + if os.path.exists(pathname): + return True + + # create the flag file and return 0 + file_mode = misc.PKG_FILE_MODE + try: + with open(pathname, "w"): + os.chmod(pathname, file_mode) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + raise + return False + + def __end_state_update(self): + """Called when we're done updating the image catalog.""" + + # get the path to the image catalog update flag file + pathname = self.__state_updating_pathname() + + # delete the flag file. + try: + portable.remove(pathname) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + raise + + def __rebuild_image_catalogs(self, progtrack=None): + """Rebuilds the image catalogs based on the available publisher + catalogs.""" + + if self.version < 3: + raise apx.ImageFormatUpdateNeeded(self.root) + + if not progtrack: + progtrack = progress.NullProgressTracker() + + progtrack.cache_catalogs_start() + + publist = list(self.gen_publishers()) + + be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root) + self.history.log_operation_start( + "rebuild-image-catalogs", be_name=be_name, be_uuid=be_uuid + ) + + # Mark all operations as occurring at this time. + op_time = datetime.datetime.utcnow() + + # The image catalogs need to be updated, but this is a bit + # tricky as previously known packages must remain known even + # if PKG_STATE_KNOWN is no longer true if any other state + # information is present. This is to allow freezing, etc. of + # package states on a permanent basis even if the package is + # no longer available from a publisher repository. However, + # this is only True of installed packages. + old_icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + + # batch_mode is set to True here since without it, catalog + # population time is almost doubled (since the catalog is + # re-sorted and stats are generated for every operation). + # In addition, the new catalog is first created in a new + # temporary directory so that it can be moved into place + # at the very end of this process (to minimize the chance + # that failure or interruption will cause the image to be + # left in an inconsistent state). + tmp_state_root = self.temporary_dir() + + # Copy any regular files placed in the state directory + for p in os.listdir(self._statedir): + if p == self.__STATE_UPDATING_FILE: + # don't copy the state updating file + continue + fp = os.path.join(self._statedir, p) + if os.path.isfile(fp): + portable.copyfile(fp, os.path.join(tmp_state_root, p)) + + kcat = pkg.catalog.Catalog( + batch_mode=True, + meta_root=os.path.join(tmp_state_root, self.IMG_CATALOG_KNOWN), + sign=False, + ) + + # XXX if any of the below fails for any reason, the old 'known' + # catalog needs to be re-loaded so the client is in a consistent + # state. + + # All enabled publisher catalogs must be processed. + pub_cats = [(pub.prefix, pub.catalog) for pub in publist] + + # XXX For backwards compatibility, 'upgradability' of packages + # is calculated and stored based on whether a given pkg stem + # matches the newest version in the catalog. This is quite + # expensive (due to overhead), but at least the cost is + # consolidated here. This comparison is also cross-publisher, + # as it used to be. In the future, it could likely be improved + # by usage of the SAT solver. + newest = {} + for pfx, cat in [(None, old_icat)] + pub_cats: + for f in cat.fmris(last=True, pubs=pfx and [pfx] or EmptyI): + nver, snver = newest.get(f.pkg_name, (None, None)) + if f.version > nver: + newest[f.pkg_name] = (f.version, str(f.version)) + + # Next, copy all of the entries for the catalog parts that + # currently exist into the image 'known' catalog. + + # Iterator for source parts. + sparts = ( + (pfx, cat, name, cat.get_part(name, must_exist=True)) + for pfx, cat in pub_cats + for name in cat.parts + ) + + # Build list of installed packages based on actual state + # information just in case there is a state issue from an + # older client. + # Also stash away any old metadata, in particular we want + # the last-install and last-update times but maybe other + # metadata will be useful in the future. + inst_stems = {} + for t, entry in old_icat.tuple_entries(): + states = entry["metadata"]["states"] + if pkgdefs.PKG_STATE_INSTALLED not in states: + continue + pub, stem, ver = t + inst_stems.setdefault(pub, {}) + inst_stems[pub].setdefault(stem, {}) + inst_stems[pub][stem][ver] = { + "installed": False, + "metadata": entry["metadata"], + } + + # Create the new installed catalog in a temporary location. + icat = pkg.catalog.Catalog( + batch_mode=True, + meta_root=os.path.join(tmp_state_root, self.IMG_CATALOG_INSTALLED), + sign=False, + ) + + excludes = self.list_excludes() + + frozen_pkgs = dict( + [(p[0].pkg_name, p[0]) for p in self.get_frozen_list()] + ) + for pfx, cat, name, spart in sparts: + # 'spart' is the source part. + if spart is None: + # Client hasn't retrieved this part. + continue + + # New known part. + nkpart = kcat.get_part(name) + nipart = icat.get_part(name) + base = name.startswith("catalog.base.") + + # Avoid accessor overhead since these will be + # used for every entry. + cat_ver = cat.version + dp = cat.get_part("catalog.dependency.C", must_exist=True) + + for t, sentry in spart.tuple_entries(pubs=[pfx]): + pub, stem, ver = t + + installed = False + if ( + pub in inst_stems + and stem in inst_stems[pub] + and ver in inst_stems[pub][stem] + ): + installed = True + inst_stems[pub][stem][ver]["installed"] = True + + # copy() is too slow here and catalog entries + # are shallow so this should be sufficient. + entry = dict(six.iteritems(sentry)) + if not base: + # Nothing else to do except add the + # entry for non-base catalog parts. + nkpart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) + if installed: + nipart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, ) + continue + + # Only the base catalog part stores package + # state information and/or other metadata. + mdata = entry.setdefault("metadata", {}) + states = mdata.setdefault("states", []) + states.append(pkgdefs.PKG_STATE_KNOWN) + + if cat_ver == 0: + states.append(pkgdefs.PKG_STATE_V0) + elif pkgdefs.PKG_STATE_V0 not in states: + # Assume V1 catalog source. + states.append(pkgdefs.PKG_STATE_V1) + + if installed: + states.append(pkgdefs.PKG_STATE_INSTALLED) + # Preserve the dates of install/update + # if present in the old metadata + md = inst_stems[pub][stem][ver]["metadata"] + for key in ["last-install", "last-update"]: + if key in md: + entry["metadata"][key] = md[key] + # Preserve the manual installation + # flag, if present in the old metadata + ostates = set(md.get("states", set())) + if pkgdefs.PKG_STATE_MANUAL in ostates: + states.append(pkgdefs.PKG_STATE_MANUAL) + + nver, snver = newest.get(stem, (None, None)) + if snver is not None and ver != snver: + states.append(pkgdefs.PKG_STATE_UPGRADABLE) + + # Check if the package is frozen. + if stem in frozen_pkgs: + f_ver = frozen_pkgs[stem].version + if f_ver == ver or pkg.version.Version(ver).is_successor( + f_ver, constraint=pkg.version.CONSTRAINT_AUTO + ): + states.append(pkgdefs.PKG_STATE_FROZEN) + + # Determine if package is obsolete or has been + # renamed and mark with appropriate state. + dpent = None + if dp is not None: + dpent = dp.get_entry(pub=pub, stem=stem, ver=ver) + if dpent is not None: + for a in dpent["actions"]: + # Constructing action objects + # for every action would be a + # lot slower, so a simple string + # match is done first so that + # only interesting actions get + # constructed. + if not a.startswith("set"): + continue + if not ( + "pkg.obsolete" in a + or "pkg.renamed" in a + or "pkg.legacy" in a + ): + continue - for pfx in self.__alt_known_cat.publishers(): - # Include alternate package source publishers - # in result, and temporarily enable any - # disabled publishers that already exist in - # the image configuration. - try: - img_pub = self.cfg.publishers[pfx] - - if not img_pub.disabled: - # No override needed. - continue - new_pub = copy.copy(img_pub) - new_pub.disabled = False - - # Discard origins and mirrors to prevent - # their accidental use. - repo = new_pub.repository - repo.reset_origins() - repo.reset_mirrors() - except KeyError: - new_pub = alt_src_pubs[pfx] - - alt_pubs[pfx] = new_pub - - publishers = [ - alt_pubs.get(p.prefix, p) - for p in self.cfg.publishers.values() - ] - publishers.extend(( - p for p in alt_pubs.values() - if p not in publishers - )) - - for pub in publishers: - # Prepare publishers for transport usage; this must be - # done each time so that information reflects current - # image state. This is done whether or not the - # publisher is returned so that in-memory state is - # always current. - pub.meta_root = self._get_publisher_meta_root( - pub.prefix) - pub.transport = self.transport - if inc_disabled or not pub.disabled: - yield pub - - def get_publisher_ranks(self): - """Return dictionary of configured + enabled publishers and - unconfigured publishers which still have packages installed. - - Each entry contains a tuple of search order index starting at - 0, and a boolean indicating whether or not this publisher is - "sticky", and a boolean indicating whether or not the - publisher is enabled""" - - pubs = self.get_sorted_publishers(inc_disabled=False) - ret = dict([ - (pubs[i].prefix, (i, pubs[i].sticky, True)) - for i in range(0, len(pubs)) - ]) - - # Add any publishers for pkgs that are installed, - # but have been deleted. These publishers are implicitly - # not-sticky and disabled. - for pub in self.get_installed_pubs(): - i = len(ret) - ret.setdefault(pub, (i, False, False)) - return ret - - def get_highest_ranked_publisher(self): - """Return the highest ranked publisher.""" - - pubs = self.cfg.get_property("property", - "publisher-search-order") - if pubs: - return self.get_publisher(prefix=pubs[0]) - for p in self.gen_publishers(): - return p - for p in self.get_installed_pubs(): - return publisher.Publisher(p) - return None - - def check_cert_validity(self, pubs=EmptyI): - """Validate the certificates of the specified publishers. - - Raise an exception if any of the certificates has expired or - is close to expiring.""" - - if not pubs: - pubs = self.gen_publishers() - - errors = [] - for p in pubs: - r = p.repository - for uri in r.origins: - if uri.ssl_cert: - try: - misc.validate_ssl_cert( - uri.ssl_cert, - prefix=p.prefix, - uri=uri) - except apx.ExpiredCertificate as e: - errors.append(e) - - if uri.ssl_key: - try: - if not os.path.exists( - uri.ssl_key): - raise apx.NoSuchKey( - uri.ssl_key, - publisher=p, - uri=uri) - except EnvironmentError as e: - raise apx._convert_error(e) - - if errors: - raise apx.ExpiredCertificates(errors) - - def has_publisher(self, prefix=None, alias=None): - """Returns a boolean value indicating whether a publisher - exists in the image configuration that matches the given - prefix or alias.""" - for pub in self.gen_publishers(inc_disabled=True): - if prefix == pub.prefix or (alias and - alias == pub.alias): - return True - return False - - def remove_publisher(self, prefix=None, alias=None, progtrack=None): - """Removes the publisher with the matching identity from the - image.""" - - if not progtrack: - progtrack = progress.NullProgressTracker() - - with self.locked_op("remove-publisher"): - pub = self.get_publisher(prefix=prefix, - alias=alias) - - self.cfg.remove_publisher(pub.prefix) - self.remove_publisher_metadata(pub, progtrack=progtrack) - self.save_config() - - def get_publishers(self, inc_disabled=True): - """Return a dictionary of configured publishers. This doesn't - include unconfigured publishers which still have packages - installed.""" - - return dict( - (p.prefix, p) - for p in self.gen_publishers(inc_disabled=inc_disabled) - ) - - def get_sorted_publishers(self, inc_disabled=True): - """Return a list of configured publishers sorted by rank. - This doesn't include unconfigured publishers which still have - packages installed.""" - - d = self.get_publishers(inc_disabled=inc_disabled) - names = self.cfg.get_property("property", - "publisher-search-order") - - # - # If someone has been editing the config file we may have - # unranked publishers. Also, as publisher come and go via the - # sysrepo we can end up with configured but unranked - # publishers. In either case just sort unranked publishers - # alphabetically. - # - unranked = set(d) - set(names) - ret = [ - d[n] - for n in names - if n in d - ] + [ - d[n] - for n in sorted(unranked) - ] - return ret - - def get_publisher(self, prefix=None, alias=None, origin=None): - for pub in self.gen_publishers(inc_disabled=True): - if prefix and prefix == pub.prefix: - return pub - elif alias and alias == pub.alias: - return pub - elif origin and pub.repository and \ - pub.repository.has_origin(origin): - return pub - - if prefix is None and alias is None and origin is None: - raise apx.UnknownPublisher(None) - - raise apx.UnknownPublisher(max(i for i in - [prefix, alias, origin] if i is not None)) - - def pub_search_before(self, being_moved, staying_put): - """Moves publisher "being_moved" to before "staying_put" - in search order. - - The caller is responsible for locking the image.""" - - self.cfg.change_publisher_search_order(being_moved, staying_put, - after=False) - - def pub_search_after(self, being_moved, staying_put): - """Moves publisher "being_moved" to after "staying_put" - in search order. - - The caller is responsible for locking the image.""" - - self.cfg.change_publisher_search_order(being_moved, staying_put, - after=True) - - def __apply_alt_pkg_sources(self, img_kcat): - pkg_pub_map = self.__alt_pkg_pub_map - if not pkg_pub_map or self.__alt_pkg_sources_loaded: - # No alternate sources to merge. - return - - # Temporarily merge the package metadata in the alternate - # known package catalog for packages not listed in the - # image's known catalog. - def merge_check(alt_kcat, pfmri, new_entry): - states = new_entry["metadata"]["states"] - if pkgdefs.PKG_STATE_INSTALLED in states: - # Not interesting; already installed. - return False, None - img_entry = img_kcat.get_entry(pfmri=pfmri) - if not img_entry is None: - # Already in image known catalog. - return False, None - return True, new_entry - - img_kcat.append(self.__alt_known_cat, cb=merge_check) - img_kcat.finalize() - - self.__alt_pkg_sources_loaded = True - self.transport.cfg.pkg_pub_map = self.__alt_pkg_pub_map - self.transport.cfg.alt_pubs = self.__alt_pubs - self.transport.cfg.reset_caches() - - def __cleanup_alt_pkg_certs(self): - """Private helper function to cleanup package certificate - information after use of temporary package data.""" - - if not self.__alt_pubs: - return - - # Cleanup publisher cert information; any certs not retrieved - # retrieved during temporary publisher use need to be expunged - # from the image configuration. - for pub in self.__alt_pubs: try: - ipub = self.cfg.publishers[pub.prefix] - except KeyError: - # Nothing to do. + act = pkg.actions.fromstr(a) + except pkg.actions.ActionError: + # If the action can't be + # parsed or is not yet + # supported, continue. + continue + + if act.attrs["value"].lower() != "true": + continue + + if act.attrs["name"] == "pkg.obsolete": + states.append(pkgdefs.PKG_STATE_OBSOLETE) + elif act.attrs["name"] == "pkg.renamed": + if not act.include_this(excludes, publisher=pub): continue + states.append(pkgdefs.PKG_STATE_RENAMED) + elif act.attrs["name"] == "pkg.legacy": + states.append(pkgdefs.PKG_STATE_LEGACY) - def set_alt_pkg_sources(self, alt_sources): - """Specifies an alternate source of package metadata to be - temporarily merged with image state so that it can be used - as part of packaging operations.""" - - if not alt_sources: - self.__init_catalogs() - self.__alt_pkg_pub_map = None - self.__alt_pubs = None - self.__alt_known_cat = None - self.__alt_pkg_sources_loaded = False - self.transport.cfg.pkg_pub_map = None - self.transport.cfg.alt_pubs = None - self.transport.cfg.reset_caches() - return - elif self.__alt_pkg_sources_loaded: - # Ensure existing alternate package source data - # is not part of temporary image state. - self.__init_catalogs() - - pkg_pub_map, alt_pubs, alt_kcat, ignored = alt_sources - self.__alt_pkg_pub_map = pkg_pub_map - self.__alt_pubs = alt_pubs - self.__alt_known_cat = alt_kcat - - def set_highest_ranked_publisher(self, prefix=None, alias=None, - pub=None): - """Sets the preferred publisher for packaging operations. - - 'prefix' is an optional string value specifying the name of - a publisher; ignored if 'pub' is provided. - - 'alias' is an optional string value specifying the alias of - a publisher; ignored if 'pub' is provided. - - 'pub' is an optional Publisher object identifying the - publisher to set as the preferred publisher. - - One of the above parameters must be provided. - - The caller is responsible for locking the image.""" - - if not pub: - pub = self.get_publisher(prefix=prefix, alias=alias) - if not self.cfg.allowed_to_move(pub): - raise apx.ModifyingSyspubException(_("Publisher '{0}' " - "is a system publisher and cannot be " - "moved.").format(pub)) - - pubs = self.get_sorted_publishers() - relative = None - for p in pubs: - # If we've gotten to the publisher we want to make - # highest ranked, then there's nothing to do because - # it's already as high as it can be. - if p == pub: - return - if self.cfg.allowed_to_move(p): - relative = p - break - assert relative, "Expected {0} to already be part of the " + \ - "search order:{1}".format(relative, ranks) - self.cfg.change_publisher_search_order(pub.prefix, - relative.prefix, after=False) - - def set_property(self, prop_name, prop_value): - with self.locked_op("set-property"): - self.cfg.set_property("property", prop_name, - prop_value) - self.save_config() - - def set_properties(self, properties): - properties = { "property": properties } - with self.locked_op("set-property"): - self.cfg.set_properties(properties) - self.save_config() - - def get_property(self, prop_name): - return self.cfg.get_property("property", prop_name) - - def has_property(self, prop_name): - try: - self.cfg.get_property("property", prop_name) - return True - except cfg.ConfigError: - return False - - def delete_property(self, prop_name): - with self.locked_op("unset-property"): - self.cfg.remove_property("property", prop_name) - self.save_config() - - def add_property_value(self, prop_name, prop_value): - with self.locked_op("add-property-value"): - self.cfg.add_property_value("property", prop_name, - prop_value) - self.save_config() - - def remove_property_value(self, prop_name, prop_value): - with self.locked_op("remove-property-value"): - self.cfg.remove_property_value("property", prop_name, - prop_value) - self.save_config() - - def destroy(self): - """Destroys the image; image object should not be used - afterwards.""" - - if not self.imgdir or not os.path.exists(self.imgdir): - return - - if os.path.abspath(self.imgdir) == "/": - # Paranoia. - return - - try: - shutil.rmtree(self.imgdir) - except EnvironmentError as e: - raise apx._convert_error(e) - - def properties(self): - if not self.cfg: - raise apx.ImageCfgEmptyError(self.root) - return list(self.cfg.get_index()["property"].keys()) - - def add_publisher(self, pub, refresh_allowed=True, progtrack=None, - approved_cas=EmptyI, revoked_cas=EmptyI, search_after=None, - search_before=None, search_first=None, unset_cas=EmptyI): - """Adds the provided publisher object to the image - configuration. - - 'refresh_allowed' is an optional, boolean value indicating - whether the publisher's metadata should be retrieved when adding - it to the image's configuration. - - 'progtrack' is an optional ProgressTracker object.""" - - with self.locked_op("add-publisher"): - return self.__add_publisher(pub, - refresh_allowed=refresh_allowed, - progtrack=progtrack, approved_cas=EmptyI, - revoked_cas=EmptyI, search_after=search_after, - search_before=search_before, - search_first=search_first, unset_cas=EmptyI) - - def __update_publisher_catalogs(self, pub, progtrack=None, - refresh_allowed=True): - # Ensure that if the publisher's meta directory already - # exists for some reason that the data within is not - # used. - self.remove_publisher_metadata(pub, progtrack=progtrack, - rebuild=False) - - repo = pub.repository - if refresh_allowed and repo.origins: - try: - # First, verify that the publisher has a - # valid pkg(7) repository. - self.transport.valid_publisher_test(pub) - pub.validate_config() - self.refresh_publishers(pubs=[pub], - progtrack=progtrack, - ignore_unreachable=False) - except Exception as e: - # Remove the newly added publisher since - # it is invalid or the retrieval failed. - if not pub.sys_pub: - self.cfg.remove_publisher(pub.prefix) - raise - except: - # Remove the newly added publisher since - # the retrieval failed. - if not pub.sys_pub: - self.cfg.remove_publisher(pub.prefix) - raise - - def __add_publisher(self, pub, refresh_allowed=True, progtrack=None, - approved_cas=EmptyI, revoked_cas=EmptyI, search_after=None, - search_before=None, search_first=None, unset_cas=EmptyI): - """Private version of add_publisher(); caller is responsible - for locking.""" - - assert (not search_after and not search_before) or \ - (not search_after and not search_first) or \ - (not search_before and not search_first) - - if self.version < self.CURRENT_VERSION: - raise apx.ImageFormatUpdateNeeded(self.root) - - for p in self.cfg.publishers.values(): - if pub.prefix == p.prefix or \ - pub.prefix == p.alias or \ - pub.alias and (pub.alias == p.alias or - pub.alias == p.prefix): - raise apx.DuplicatePublisher(pub) - - if not progtrack: - progtrack = progress.NullProgressTracker() - - # Must assign this first before performing operations. - pub.meta_root = self._get_publisher_meta_root( - pub.prefix) - pub.transport = self.transport - - # Before continuing, validate SSL information. - try: - self.check_cert_validity(pubs=[pub]) - except apx.ExpiringCertificate as e: - logger.error(str(e)) + mdata["states"] = states - self.cfg.publishers[pub.prefix] = pub + # Add base entries. + nkpart.add( + metadata=entry, op_time=op_time, pub=pub, stem=stem, ver=ver + ) + if installed: + nipart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) + + # Now add installed packages to list of known packages using + # previous state information. While doing so, track any + # new entries as the versions for the stem of the entry will + # need to be passed to finalize() for sorting. + final_fmris = [] + for name in old_icat.parts: + # Old installed part. + ipart = old_icat.get_part(name, must_exist=True) + + # New known part. + nkpart = kcat.get_part(name) + + # New installed part. + nipart = icat.get_part(name) + + base = name.startswith("catalog.base.") + + mdata = None + for t, entry in ipart.tuple_entries(): + pub, stem, ver = t + + if ( + pub not in inst_stems + or stem not in inst_stems[pub] + or ver not in inst_stems[pub][stem] + or inst_stems[pub][stem][ver]["installed"] + ): + # Entry is no longer valid or is already + # known. + continue + + if base: + mdata = entry["metadata"] + states = set(mdata["states"]) + states.discard(pkgdefs.PKG_STATE_KNOWN) + + nver, snver = newest.get(stem, (None, None)) + if not nver or (snver is not None and ver == snver): + states.discard(pkgdefs.PKG_STATE_UPGRADABLE) + elif snver is not None: + states.add(pkgdefs.PKG_STATE_UPGRADABLE) + # Check if the package is frozen. + if stem in frozen_pkgs: + f_ver = frozen_pkgs[stem].version + if f_ver == ver or pkg.version.Version( + ver + ).is_successor( + f_ver, constraint=pkg.version.CONSTRAINT_AUTO + ): + states.add(pkgdefs.PKG_STATE_FROZEN) + else: + states.discard(pkgdefs.PKG_STATE_FROZEN) + + mdata["states"] = list(states) + + # Add entries. + nkpart.add( + metadata=entry, op_time=op_time, pub=pub, stem=stem, ver=ver + ) + nipart.add( + metadata=entry, op_time=op_time, pub=pub, stem=stem, ver=ver + ) + final_fmris.append( + pkg.fmri.PkgFmri(name=stem, publisher=pub, version=ver) + ) - self.__update_publisher_catalogs(pub, progtrack=progtrack, - refresh_allowed=refresh_allowed) + # Save the new catalogs. + for cat in kcat, icat: + misc.makedirs(cat.meta_root) + cat.finalize(pfmris=final_fmris) + cat.save() + + # Next, preserve the old installed state dir, rename the + # new one into place, and then remove the old one. + orig_state_root = self.salvage(self._statedir, full_path=True) + portable.rename(tmp_state_root, self._statedir) + shutil.rmtree(orig_state_root, True) + + # Ensure in-memory catalogs get reloaded. + self.__init_catalogs() + + self.update_last_modified() + progtrack.cache_catalogs_done() + self.history.log_operation_end() + + def refresh_publishers( + self, + full_refresh=False, + immediate=False, + pubs=None, + progtrack=None, + ignore_unreachable=True, + ): + """Refreshes the metadata (e.g. catalog) for one or more + publishers. Callers are responsible for locking the image. + + 'full_refresh' is an optional boolean value indicating whether + a full retrieval of publisher metadata (e.g. catalogs) or only + an update to the existing metadata should be performed. When + True, 'immediate' is also set to True. + + 'immediate' is an optional boolean value indicating whether the + a refresh should occur now. If False, a publisher's selected + repository will only be checked for updates if the update + interval period recorded in the image configuration has been + exceeded. + + 'pubs' is a list of publisher prefixes or publisher objects + to refresh. Passing an empty list or using the default value + implies all publishers. + + 'ignore_unreachable' is an optional boolean value indicating + whether unreachable repositories should be ignored. If True, + errors contacting this repository are stored in the transport + but no exception is raised, allowing an operation to continue + if an unneeded repository is not online.""" + + if self.version < 3: + raise apx.ImageFormatUpdateNeeded(self.root) + + if not progtrack: + progtrack = progress.NullProgressTracker() + + be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root) + self.history.log_operation_start( + "refresh-publishers", be_name=be_name, be_uuid=be_uuid + ) + + pubs_to_refresh = [] + + if not pubs: + # Omit disabled publishers. + pubs = [p for p in self.gen_publishers()] + + if not pubs: + self.__rebuild_image_catalogs(progtrack=progtrack) + return + + for pub in pubs: + p = pub + if not isinstance(p, publisher.Publisher): + p = self.get_publisher(prefix=p) + if p.disabled: + e = apx.DisabledPublisher(p) + self.history.log_operation_end(error=e) + raise e + pubs_to_refresh.append(p) + + if not pubs_to_refresh: + self.history.log_operation_end(result=history.RESULT_NOTHING_TO_DO) + return + + # Verify validity of certificates before attempting network + # operations. + try: + self.check_cert_validity(pubs=pubs_to_refresh) + except apx.ExpiringCertificate as e: + logger.error(str(e)) + + try: + # Ensure Image directory structure is valid. + self.mkdirs() + except Exception as e: + self.history.log_operation_end(error=e) + raise + + progtrack.refresh_start(len(pubs_to_refresh), full_refresh=full_refresh) + + failed = [] + total = 0 + succeeded = set() + updated = self.__start_state_update() + for pub in pubs_to_refresh: + total += 1 + progtrack.refresh_start_pub(pub) + try: + changed, e = pub.refresh( + full_refresh=full_refresh, + immediate=immediate, + progtrack=progtrack, + ) + if changed: + updated = True + + if not ignore_unreachable and e: + failed.append((pub, e)) + continue + + except apx.PermissionsException as e: + failed.append((pub, e)) + # No point in continuing since no data can + # be written. + break + except apx.ApiException as e: + failed.append((pub, e)) + continue + finally: + progtrack.refresh_end_pub(pub) + succeeded.add(pub.prefix) + + progtrack.refresh_done() + + if updated: + self.__rebuild_image_catalogs(progtrack=progtrack) + # Ensure any configuration or metadata changes made + # during refresh are reflected in on-disk state. + self.save_config() + else: + self.__end_state_update() + + if failed: + e = apx.CatalogRefreshException(failed, total, len(succeeded)) + self.history.log_operation_end(error=e) + raise e + + if not updated: + self.history.log_operation_end(result=history.RESULT_NOTHING_TO_DO) + return + self.history.log_operation_end() + + def _get_publisher_meta_dir(self): + if self.version >= 3: + return IMG_PUB_DIR + return "catalog" + + def _get_publisher_cache_root(self, prefix): + return os.path.join(self.imgdir, "cache", "publisher", prefix) + + def _get_publisher_meta_root(self, prefix): + return os.path.join(self.imgdir, self._get_publisher_meta_dir(), prefix) + + def remove_publisher_metadata(self, pub, progtrack=None, rebuild=True): + """Removes the metadata for the specified publisher object, + except data for installed packages. + + 'pub' is the object of the publisher to remove the data for. + + 'progtrack' is an optional ProgressTracker object. + + 'rebuild' is an optional boolean specifying whether image + catalogs should be rebuilt after removing the publisher's + metadata. + """ - for ca in approved_cas: - try: - ca = os.path.abspath(ca) - fh = open(ca, "r") - s = fh.read() - fh.close() - except EnvironmentError as e: - if e.errno == errno.ENOENT: - raise apx.MissingFileArgumentException( - ca) - raise apx._convert_error(e) - pub.approve_ca_cert(s, manual=True) - - for hsh in revoked_cas: - pub.revoke_ca_cert(hsh) - - for hsh in unset_cas: - pub.unset_ca_cert(hsh) - - if search_first: - self.set_highest_ranked_publisher(prefix=pub.prefix) - elif search_before: - self.pub_search_before(pub.prefix, search_before) - elif search_after: - self.pub_search_after(pub.prefix, search_after) - - # Only after success should the configuration be saved. - self.save_config() - - def __process_verify(self, act, path, path_only, fmri, excludes, - vardrate_excludes, progresstracker, verifypaths=None, - overlaypaths=None, **kwargs): - errors = [] - warnings = [] - info = [] - if act.include_this(excludes, publisher=fmri.publisher): - if not path_only: - errors, warnings, info = act.verify( - self, pfmri=fmri, **kwargs) - elif path in verifypaths or path in overlaypaths: - if path in verifypaths: - progresstracker.plan_add_progress( - progresstracker.PLAN_PKG_VERIFY) - - errors, warnings, info = act.verify( - self, pfmri=fmri, **kwargs) - # It's safe to immediately discard this - # match as only one action can deliver a - # path with overlay=allow and only one with - # overlay=true. - overlaypaths.discard(path) - if act.attrs.get("overlay") == "allow": - overlaypaths.add(path) - verifypaths.discard(path) - elif act.include_this(vardrate_excludes, - publisher=fmri.publisher) and not act.refcountable: - # Verify that file that is faceted out does not - # exist. Exclude actions which may be delivered - # from multiple packages. - if path is not None and os.path.exists(os.path.join( - self.root, path)): - errors.append(_("File should not exist")) - else: - # Action that is not applicable to image variant - # or has been dehydrated. - return None, None, None, True - return errors, warnings, info, False - - def verify(self, fmri, progresstracker, verifypaths=None, - overlaypaths=None, single_act=None, **kwargs): - """Generator that returns a tuple of the form (action, errors, - warnings, info) if there are any error, warning, or other - messages about an action contained within the specified - package. Where the returned messages are lists of strings - indicating fatal problems, potential issues (that can be - ignored), or extra information to be displayed respectively. - - 'fmri' is the fmri of the package to verify. - - 'progresstracker' is a ProgressTracker object. - - 'verifypaths' is the set of paths to verify. - - 'overlaypaths' is the set of overlaying path to verify. - - 'single_act' is the only action of the specified fmri to - verify. - - 'kwargs' is a dict of additional keyword arguments to be passed - to each action verification routine.""" - - path_only = bool(verifypaths or overlaypaths) - # pkg verify only looks at actions that have not been dehydrated. - excludes = self.list_excludes() - vardrate_excludes = [self.cfg.variants.allow_action] - dehydrate = self.cfg.get_property("property", "dehydrated") - if dehydrate: - func = self.get_dehydrated_exclude_func(dehydrate) - excludes.append(func) - vardrate_excludes.append(func) - - # If single_act is set, only that action will be processed. - if single_act: - overlay = None - if single_act.attrs.get("overlay") == "allow": - overlay = "overlaid" - elif single_act.attrs.get("overlay") == "true": - overlay = "overlaying" - progresstracker.plan_add_progress( - progresstracker.PLAN_PKG_VERIFY, nitems=0) - path = single_act.attrs.get("path") - errors, warnings, info, ignore = \ - self.__process_verify(single_act, - path, path_only, fmri, - excludes, vardrate_excludes, - progresstracker, verifypaths=verifypaths, - overlaypaths=overlaypaths, **kwargs) - if (errors or warnings or info) and not ignore: - yield single_act, errors, \ - warnings, info, overlay - return + if self.version < 4: + # Older images don't require fine-grained deletion. + pub.remove_meta_root() + if rebuild: + self.__rebuild_image_catalogs(progtrack=progtrack) + return + + # Build a list of paths that shouldn't be removed because they + # belong to installed packages. + excluded = [ + self.get_manifest_path(f) + for f in self.gen_installed_pkgs() + if f.publisher == pub.prefix + ] + + if not excluded: + pub.remove_meta_root() + else: + try: + # Discard all publisher metadata except + # package manifests as a first pass. + for entry in os.listdir(pub.meta_root): + if entry == "pkg": + continue + + target = os.path.join(pub.meta_root, entry) + if os.path.isdir(target): + shutil.rmtree(target, ignore_errors=True) + else: + portable.remove(target) + + # Build the list of directories that can't be + # removed. + exdirs = [os.path.dirname(e) for e in excluded] + + # Now try to discard only package manifests + # that aren't for installed packages. + mroot = os.path.join(pub.meta_root, "pkg") + for pdir in os.listdir(mroot): + proot = os.path.join(mroot, pdir) + if proot not in exdirs: + # This removes all manifest data + # for a given package stem. + shutil.rmtree(proot, ignore_errors=True) + continue + + # Remove only manifest data for packages + # that are not installed. + for mname in os.listdir(proot): + mpath = os.path.join(proot, mname) + if mpath not in excluded: + portable.remove(mpath) + + # Finally, dump any cache data for this + # publisher if possible. + shutil.rmtree( + self._get_publisher_cache_root(pub.prefix), + ignore_errors=True, + ) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise apx._convert_error(e) + + if rebuild: + self.__rebuild_image_catalogs(progtrack=progtrack) + + def gen_installed_pkg_names(self, anarchy=True): + """A generator function that produces FMRI strings as it + iterates over the list of installed packages. This is + faster than gen_installed_pkgs when only the FMRI string + is needed.""" + + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + for f in cat.fmris(objects=False): + if anarchy: + # Catalog entries always have publisher prefix. + yield "pkg:/{0}".format(f[6:].split("/", 1)[-1]) + continue + yield f + + def gen_installed_pkgs(self, pubs=EmptyI, ordered=False): + """Return an iteration through the installed packages.""" + + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + for f in cat.fmris(pubs=pubs, ordered=ordered): + yield f + + def count_installed_pkgs(self, pubs=EmptyI): + """Return the number of installed packages.""" + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + assert cat.package_count == cat.package_version_count + return sum( + pkg_count + for (pub, pkg_count, _ignored) in cat.get_package_counts_by_pub( + pubs=pubs + ) + ) + + def gen_tracked_stems(self): + """Return an iteration through all the tracked pkg stems + in the set of currently installed packages. Return value + is group pkg fmri, stem""" + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + excludes = self.list_excludes() + + for f in cat.fmris(): + for a in cat.get_entry_actions( + f, [pkg.catalog.Catalog.DEPENDENCY], excludes=excludes + ): + if a.name == "depend" and a.attrs["type"] == "group": + yield (f, self.strtofmri(a.attrs["fmri"]).pkg_name) + + def _create_fast_lookups(self, progtrack=None): + """Create an on-disk database mapping action name and key + attribute value to the action string comprising the unique + attributes of the action, for all installed actions. This is + done with a file mapping the tuple to an offset into a second + file, where those actions are kept. Once the offsets are loaded + into memory, it is simple to seek into the second file to the + given offset and read until you hit an action that doesn't + match.""" + + if not progtrack: + progtrack = progress.NullProgressTracker() + + self.__actdict = None + self.__actdict_timestamp = None + stripped_path = os.path.join( + self.__action_cache_dir, "actions.stripped" + ) + offsets_path = os.path.join(self.__action_cache_dir, "actions.offsets") + conflicting_keys_path = os.path.join( + self.__action_cache_dir, "keys.conflicting" + ) + + excludes = self.list_excludes() + heap = [] + + # nsd is the "name-space dictionary." It maps action name + # spaces (see action.generic for more information) to + # dictionaries which map keys to pairs which contain an action + # with that key and the pfmri of the package which delivered the + # action. + nsd = {} + + from heapq import heappush, heappop + + progtrack.job_start(progtrack.JOB_FAST_LOOKUP) + + for pfmri in self.gen_installed_pkgs(): + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + m = self.get_manifest(pfmri, ignore_excludes=True) + for act in m.gen_actions(excludes=excludes): + if not act.globally_identical: + continue + act.strip() + heappush(heap, (act.name, act.attrs[act.key_attr], pfmri, act)) + nsd.setdefault(act.namespace_group, {}) + nsd[act.namespace_group].setdefault(act.attrs[act.key_attr], []) + nsd[act.namespace_group][act.attrs[act.key_attr]].append( + (act, pfmri) + ) - try: - pub = self.get_publisher(prefix=fmri.publisher) - except apx.UnknownPublisher: - # Since user removed publisher, assume this is the same - # as if they had set signature-policy ignore for the - # publisher. - sig_pol = None + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + + # If we can't write the temporary files, then there's no point + # in producing actdict because it depends on a synchronized + # stripped actions file. + try: + actdict = {} + sf, sp = self.temporary_file(close=False) + of, op = self.temporary_file(close=False) + bf, bp = self.temporary_file(close=False) + + sf = os.fdopen(sf, "w") + of = os.fdopen(of, "w") + bf = os.fdopen(bf, "w") + + # We need to make sure the files are coordinated. + timestamp = int(time.time()) + sf.write("VERSION 1\n{0}\n".format(timestamp)) + of.write("VERSION 2\n{0}\n".format(timestamp)) + # The conflicting keys file doesn't need a timestamp + # because it's not coordinated with the stripped or + # offsets files and the result of loading it isn't + # reused by this class. + bf.write("VERSION 1\n") + + cnt, offset_update_bytes = 0, 0 + last_name, last_key, last_offset = None, None, sf.tell() + while heap: + # This is a tight loop, so try to avoid burning + # CPU calling into the progress tracker + # excessively. + if len(heap) % 100 == 0: + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + item = heappop(heap) + fmri, act = item[2:] + key = act.attrs[act.key_attr] + if act.name != last_name or key != last_key: + if last_name is None: + assert last_key is None + cnt += 1 + last_name = act.name + last_key = key + else: + assert cnt > 0 + of.write( + "{0} {1} {2} {3}\n".format( + last_name, last_offset, cnt, last_key + ) + ) + actdict[(last_name, last_key)] = last_offset, cnt + last_name, last_key = act.name, key + last_offset += offset_update_bytes + offset_update_bytes = 0 + cnt = 1 else: - sig_pol = self.signature_policy.combine( - pub.signature_policy) - - if not path_only: - progresstracker.plan_add_progress( - progresstracker.PLAN_PKG_VERIFY) - - manf = self.get_manifest(fmri, ignore_excludes=True) - sigs = list(manf.gen_actions_by_type("signature", - excludes=self.list_excludes())) - if sig_pol and (sigs or sig_pol.name != "ignore"): - # Only perform signature verification logic if there are - # signatures or if signature-policy is not 'ignore'. - try: - # Signature verification must be done using all - # the actions from the manifest, not just the - # ones for this image's variants. - sig_pol.process_signatures(sigs, - manf.gen_actions(), pub, self.trust_anchors, - self.cfg.get_policy( - "check-certificate-revocation")) - except apx.SigningException as e: - e.pfmri = fmri - yield e.sig, [e], [], [], None - except apx.InvalidResourceLocation as e: - yield None, [e], [], [], None - - def mediation_allowed(act): - """Helper function to determine if the mediation - delivered by a link is allowed. If it is, then - the link should be verified. (Yes, this does mean - that the non-existence of links is not verified.) - """ - - mediator = act.attrs.get("mediator") - if not mediator or mediator not in self.cfg.mediators: - # Link isn't mediated or mediation is unknown. - return True - - cfg_med_version = self.cfg.mediators[mediator].get( - "version") - cfg_med_impl = self.cfg.mediators[mediator].get( - "implementation") - - med_version = act.attrs.get("mediator-version") - if med_version: - med_version = pkg.version.Version( - med_version) - med_impl = act.attrs.get("mediator-implementation") - - return med_version == cfg_med_version and \ - med.mediator_impl_matches(med_impl, cfg_med_impl) - - for act in manf.gen_actions(): - path = act.attrs.get("path") - # Defer verification on actions with 'overlay' - # attribute = 'allow'. - if not path_only: - if act.attrs.get("overlay") == "true": - yield act, [], [], [], "overlaying" - continue - elif act.attrs.get("overlay"): - yield act, [], [], [], "overlaid" - continue - - - progresstracker.plan_add_progress( - progresstracker.PLAN_PKG_VERIFY, nitems=0) - - if (act.name == "link" or - act.name == "hardlink") and \ - not mediation_allowed(act): - # Link doesn't match configured - # mediation, so shouldn't be verified. - continue - - errors, warnings, info, ignore = self.__process_verify( - act, path, path_only, fmri, excludes, - vardrate_excludes, progresstracker, - verifypaths=verifypaths, overlaypaths=overlaypaths, - **kwargs) - if (errors or warnings or info) and not ignore: - yield act, errors, warnings, info, None - - def image_config_update(self, new_variants, new_facets, new_mediators): - """update variants in image config""" - - if new_variants is not None: - self.cfg.variants.update(new_variants) - if new_facets is not None: - self.cfg.facets = new_facets - if new_mediators is not None: - self.cfg.mediators = new_mediators - self.save_config() - - def __verify_manifest(self, fmri, mfstpath, alt_pub=None): - """Verify a manifest. The caller must supply the FMRI - for the package in 'fmri', as well as the path to the - manifest file that will be verified.""" - + cnt += 1 + sf_line = f"{fmri} {act}\n" + sf.write(sf_line) + offset_update_bytes += len(sf_line.encode("utf-8")) + if last_name is not None: + assert last_key is not None + assert last_offset is not None + assert cnt > 0 + of.write( + "{0} {1} {2} {3}\n".format( + last_name, last_offset, cnt, last_key + ) + ) + actdict[(last_name, last_key)] = last_offset, cnt + + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + + bad_keys = imageplan.ImagePlan._check_actions(nsd) + for k in sorted(bad_keys): + bf.write("{0}\n".format(k)) + + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + sf.close() + of.close() + bf.close() + os.chmod(sp, misc.PKG_FILE_MODE) + os.chmod(op, misc.PKG_FILE_MODE) + os.chmod(bp, misc.PKG_FILE_MODE) + except BaseException as e: + try: + os.unlink(sp) + os.unlink(op) + os.unlink(bp) + except: + pass + raise + + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + + # Finally, rename the temporary files into their final place. + # If we have any problems, do our best to remove them, and we'll + # try to recreate them on the read-side. + try: + if not os.path.exists(self.__action_cache_dir): + os.makedirs(self.__action_cache_dir) + portable.rename(sp, stripped_path) + portable.rename(op, offsets_path) + portable.rename(bp, conflicting_keys_path) + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + self.__action_cache_dir = self.temporary_dir() + stripped_path = os.path.join( + self.__action_cache_dir, "actions.stripped" + ) + offsets_path = os.path.join( + self.__action_cache_dir, "actions.offsets" + ) + conflicting_keys_path = os.path.join( + self.__action_cache_dir, "keys.conflicting" + ) + portable.rename(sp, stripped_path) + portable.rename(op, offsets_path) + portable.rename(bp, conflicting_keys_path) + else: + exc_info = sys.exc_info() try: - return self.transport._verify_manifest(fmri, - mfstpath=mfstpath, pub=alt_pub) - except InvalidContentException: - return False - - def has_manifest(self, pfmri, alt_pub=None): - """Check to see if the manifest for pfmri is present on disk and - has the correct hash.""" - - pth = self.get_manifest_path(pfmri) - on_disk = os.path.exists(pth) - - if not on_disk or \ - self.is_pkg_installed(pfmri) or \ - self.__verify_manifest(fmri=pfmri, mfstpath=pth, alt_pub=alt_pub): - return on_disk - return False - - def get_license_dir(self, pfmri): - """Return path to package license directory.""" - if self.version == self.CURRENT_VERSION: - # Newer image format stores license files per-stem, - # instead of per-stem and version, so that transitions - # between package versions don't require redelivery - # of license files. - return os.path.join(self.imgdir, "license", - pfmri.get_dir_path(stemonly=True)) - # Older image formats store license files in the manifest cache - # directory. - return self.get_manifest_dir(pfmri) - - def __get_installed_pkg_publisher(self, pfmri): - """Returns the publisher for the FMRI of an installed package - or None if the package is not installed. - """ - for f in self.gen_installed_pkgs(): - if f.pkg_name == pfmri.pkg_name: - return f.publisher + os.unlink(stripped_path) + os.unlink(offsets_path) + os.unlink(conflicting_keys_path) + except: + pass + six.reraise(exc_info[0], exc_info[1], exc_info[2]) + + progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) + progtrack.job_done(progtrack.JOB_FAST_LOOKUP) + return actdict, timestamp + + def _remove_fast_lookups(self): + """Remove on-disk database created by _create_fast_lookups. + Should be called before updating image state to prevent the + client from seeing stale state if _create_fast_lookups is + interrupted.""" + + for fname in ( + "actions.stripped", + "actions.offsets", + "keys.conflicting", + ): + try: + portable.remove(os.path.join(self.__action_cache_dir, fname)) + except EnvironmentError as e: + if e.errno == errno.ENOENT: + continue + raise apx._convert_error(e) + + def _load_actdict(self, progtrack): + """Read the file of offsets created in _create_fast_lookups() + and return the dictionary mapping action name and key value to + offset.""" + + try: + of = open( + os.path.join(self.__action_cache_dir, "actions.offsets"), "r" + ) + except IOError as e: + if e.errno != errno.ENOENT: + raise + actdict, otimestamp = self._create_fast_lookups() + assert actdict is not None + self.__actdict = actdict + self.__actdict_timestamp = otimestamp + return actdict + + # Make sure the files are paired, and try to create them if not. + oversion = of.readline().rstrip() + otimestamp = of.readline().rstrip() + + # The original action.offsets file existed and had the same + # timestamp as the stored actdict, so that actdict can be + # reused. + if self.__actdict and otimestamp == self.__actdict_timestamp: + return self.__actdict + + sversion, stimestamp = self._get_stripped_actions_file(internal=True) + + # If we recognize neither file's version or their timestamps + # don't match, then we blow them away and try again. + if ( + oversion != "VERSION 2" + or sversion != "VERSION 1" + or stimestamp != otimestamp + ): + of.close() + actdict, otimestamp = self._create_fast_lookups() + assert actdict is not None + self.__actdict = actdict + self.__actdict_timestamp = otimestamp + return actdict + + # At this point, the original actions.offsets file existed, no + # actdict was saved in the image, the versions matched what was + # expected, and the timestamps of the actions.offsets and + # actions.stripped files matched, so the actions.offsets file is + # parsed to generate actdict. + actdict = {} + + for line in of: + actname, offset, cnt, key_attr = line.rstrip().split(None, 3) + off = int(offset) + actdict[(actname, key_attr)] = (off, int(cnt)) + + # This is a tight loop, so try to avoid burning + # CPU calling into the progress tracker excessively. + # Since we are already using the offset, we use that + # to damp calls back into the progress tracker. + if off % 500 == 0: + progtrack.plan_add_progress(progtrack.PLAN_ACTION_CONFLICT) + + of.close() + self.__actdict = actdict + self.__actdict_timestamp = otimestamp + return actdict + + def _get_stripped_actions_file(self, internal=False): + """Open the actions file described in _create_fast_lookups() and + return the corresponding file object.""" + + sf = open( + os.path.join(self.__action_cache_dir, "actions.stripped"), "r" + ) + sversion = sf.readline().rstrip() + stimestamp = sf.readline().rstrip() + if internal: + sf.close() + return sversion, stimestamp + + return sf + + def _load_conflicting_keys(self): + """Load the list of keys which have conflicting actions in the + existing image. If no such list exists, then return None.""" + + pth = os.path.join(self.__action_cache_dir, "keys.conflicting") + try: + with open(pth, "r") as fh: + version = fh.readline().rstrip() + if version != "VERSION 1": + return None + return set(l.rstrip() for l in fh) + except EnvironmentError as e: + if e.errno == errno.ENOENT: return None + raise + + def gen_installed_actions_bytype(self, atype, implicit_dirs=False): + """Iterates through the installed actions of type 'atype'. If + 'implicit_dirs' is True and 'atype' is 'dir', then include + directories only implicitly defined by other filesystem + actions.""" + + if implicit_dirs and atype != "dir": + implicit_dirs = False + + excludes = self.list_excludes() + + for pfmri in self.gen_installed_pkgs(): + m = self.get_manifest(pfmri) + dirs = set() + for act in m.gen_actions_by_type(atype, excludes=excludes): + if implicit_dirs: + dirs.add(act.attrs["path"]) + yield act, pfmri + if implicit_dirs: + da = pkg.actions.directory.DirectoryAction + for d in m.get_directories(excludes): + if d not in dirs: + yield da(path=d, implicit="true"), pfmri + + def get_installed_pubs(self): + """Returns a set containing the prefixes of all publishers with + installed packages.""" + + cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + return cat.publishers() + + def strtofmri(self, myfmri): + return pkg.fmri.PkgFmri(myfmri) + + def strtomatchingfmri(self, myfmri): + return pkg.fmri.MatchingPkgFmri(myfmri) + + def get_user_by_name(self, name): + uid = self._usersbyname.get(name, None) + if uid is not None: + return uid + return portable.get_user_by_name(name, self.root, self.type != IMG_USER) + + def get_name_by_uid(self, uid, returnuid=False): + # XXX What to do about IMG_PARTIAL? + try: + return portable.get_name_by_uid( + uid, self.root, self.type != IMG_USER + ) + except KeyError: + if returnuid: + return uid + else: + raise + + def get_group_by_name(self, name): + gid = self._groupsbyname.get(name, None) + if gid is not None: + return gid + return portable.get_group_by_name( + name, self.root, self.type != IMG_USER + ) + + def get_name_by_gid(self, gid, returngid=False): + try: + return portable.get_name_by_gid( + gid, self.root, self.type != IMG_USER + ) + except KeyError: + if returngid: + return gid + else: + raise + + def get_usernames_by_gid(self, gid): + return portable.get_usernames_by_gid(gid, self.root) + + def update_index_dir(self, postfix="index"): + """Since the index directory will not reliably be updated when + the image root is, this should be called prior to using the + index directory. + """ + if self.version == self.CURRENT_VERSION: + self.index_dir = os.path.join(self.imgdir, "cache", postfix) + else: + self.index_dir = os.path.join(self.imgdir, postfix) + + def cleanup_downloads(self): + """Clean up any downloads that were in progress but that + did not successfully finish.""" + + shutil.rmtree(self._incoming_cache_dir, True) + + def cleanup_cached_content( + self, progtrack=None, force=False, verbose=False + ): + """Delete the directory that stores all of our cached + downloaded content. This may take a while for a large + directory hierarchy. Don't clean up caches if the + user overrode the underlying setting using PKG_CACHEDIR or + PKG_CACHEROOT.""" + + if not force and not self.cfg.get_policy( + imageconfig.FLUSH_CONTENT_CACHE + ): + return + + cdirs = [] + for path, readonly, pub, layout in self.get_cachedirs(): + if verbose: + print("Checking cache directory {} ({})".format(path, pub)) + if readonly or ( + self.__user_cache_dir and path.startswith(self.__user_cache_dir) + ): + continue + cdirs.append(path) + + if not cdirs: + return + + if not progtrack: + progtrack = progress.NullProgressTracker() + + # 'Updating package cache' + progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=len(cdirs)) + for path in cdirs: + shutil.rmtree(path, True) + progtrack.job_add_progress(progtrack.JOB_PKG_CACHE) + progtrack.job_done(progtrack.JOB_PKG_CACHE) + + def salvage(self, path, full_path=False): + """Called when unexpected file or directory is found during + package operations; returns the path of the salvage + directory where the item was stored. Can be called with + either image-relative or absolute (current) path to file/dir + to be salvaged. If full_path is False (the default), remove + the current mountpoint of the image from the returned + directory path""" + + # This ensures that if the path is already rooted in the image, + # that it will be stored in lost+found (due to os.path.join + # behaviour with absolute path components). + if path.startswith(self.root): + path = path.replace(self.root, "", 1) + + if os.path.isabs(path): + # If for some reason the path wasn't rooted in the + # image, but it is an absolute one, then strip the + # absolute part so that it will be stored in lost+found + # (due to os.path.join behaviour with absolute path + # components). + path = os.path.splitdrive(path)[-1].lstrip(os.path.sep) + + sdir = os.path.normpath( + os.path.join( + self.imgdir, + "lost+found", + path + "-" + time.strftime("%Y%m%dT%H%M%SZ"), + ) + ) + + parent = os.path.dirname(sdir) + if not os.path.exists(parent): + misc.makedirs(parent) + + orig = os.path.normpath(os.path.join(self.root, path)) + + misc.move(orig, sdir) + # remove current mountpoint from sdir + if not full_path: + sdir.replace(self.root, "", 1) + return sdir + + def recover(self, local_spath, full_dest_path, dest_path, old_path): + """Called when recovering directory contents to implement + "salvage-from" directive... full_dest_path must exist. + dest_path is the image-relative location where we salvage to, + old_path is original image-relative directory that delivered + the files we're now recovering. + + When recovering directories where the salvage-from string is + a substring of the previously packaged directory, attempt + to restore as much of the old directory structure as possible + by comparing the salvage-from value with the previously + packaged directory. + + For example, say we had user-content in /var/user/myuser/.ssh, + but have stopped delivering that dir, replacing it with a new + directory /var/.migrate/user which specifies + salvage-from=var/user. + + The intent of the package author, was to have the + /var/.migrate/user directory get the unpackaged 'myuser/.ssh' + directory created as part of the salvaging operation, giving + them /var/.migrate/user/myuser/.ssh + and not to just end up with + /var/.migrate/user/ + """ - def get_manifest_dir(self, pfmri): - """Return path to on-disk manifest cache directory.""" - if not pfmri.publisher: - # Needed for consumers such as search that don't provide - # publisher information. - pfmri = pfmri.copy() - pfmri.publisher = self.__get_installed_pkg_publisher( - pfmri) - assert pfmri.publisher - if self.version == self.CURRENT_VERSION: - root = self._get_publisher_cache_root(pfmri.publisher) + source_path = os.path.normpath(os.path.join(self.root, local_spath)) + if dest_path != old_path and old_path.startswith( + dest_path + os.path.sep + ): + # this is here so that when salvaging the contents + # of a previously packaged directory, we attempt to + # restore as much of the old directory structure as + # possible. + spath = os.path.relpath(old_path, dest_path) + full_dest_path = os.path.join(full_dest_path, spath) + try: + os.makedirs(full_dest_path) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + for file_name in os.listdir(source_path): + misc.move(os.path.join(source_path, file_name), full_dest_path) + + def temporary_dir(self): + """Create a temp directory under the image directory for various + purposes. If the process is unable to create a directory in the + image's temporary directory, a replacement location is found.""" + + try: + misc.makedirs(self.__tmpdir) + except (apx.PermissionsException, apx.ReadOnlyFileSystemException): + self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") + atexit.register(shutil.rmtree, self.__tmpdir, ignore_errors=True) + return self.temporary_dir() + + try: + rval = tempfile.mkdtemp(dir=self.__tmpdir) + + # Force standard mode. + os.chmod(rval, misc.PKG_DIR_MODE) + return rval + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") + atexit.register( + shutil.rmtree, self.__tmpdir, ignore_errors=True + ) + return self.temporary_dir() + raise apx._convert_error(e) + + def temporary_file(self, close=True): + """Create a temporary file under the image directory for various + purposes. If 'close' is True, close the file descriptor; + otherwise leave it open. If the process is unable to create a + file in the image's temporary directory, a replacement is + found.""" + + try: + misc.makedirs(self.__tmpdir) + except (apx.PermissionsException, apx.ReadOnlyFileSystemException): + self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") + atexit.register(shutil.rmtree, self.__tmpdir, ignore_errors=True) + return self.temporary_file(close=close) + + try: + fd, name = tempfile.mkstemp(dir=self.__tmpdir) + if close: + os.close(fd) + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") + atexit.register( + shutil.rmtree, self.__tmpdir, ignore_errors=True + ) + return self.temporary_file(close=close) + raise apx._convert_error(e) + + if close: + return name + else: + return fd, name + + def __filter_install_matches(self, matches): + """Attempts to eliminate redundant matches found during + packaging operations: + + * First, stems of installed packages for publishers that + are now unknown (no longer present in the image + configuration) are dropped. + + * Second, if multiple matches are still present, stems of + of installed packages, that are not presently in the + corresponding publisher's catalog, are dropped. + + * Finally, if multiple matches are still present, all + stems except for those in state PKG_STATE_INSTALLED are + dropped. + + Returns a list of the filtered matches, along with a dict of + their unique names.""" + + olist = [] + onames = set() + + # First eliminate any duplicate matches that are for unknown + # publishers (publishers which have been removed from the image + # configuration). + publist = set(p.prefix for p in self.get_publishers().values()) + for m, st in matches: + if m.publisher in publist: + onames.add(m.get_pkg_stem()) + olist.append((m, st)) + + # Next, if there are still multiple matches, eliminate matches + # belonging to publishers that no longer have the FMRI in their + # catalog. + found_state = False + if len(onames) > 1: + mlist = [] + mnames = set() + for m, st in olist: + if not st["in_catalog"]: + continue + if st["state"] == pkgdefs.PKG_STATE_INSTALLED: + found_state = True + mnames.add(m.get_pkg_stem()) + mlist.append((m, st)) + olist = mlist + onames = mnames + + # Finally, if there are still multiple matches, and a known + # stem is installed, then eliminate any stems that do not + # have an installed version. + if found_state and len(onames) > 1: + mlist = [] + mnames = set() + for m, st in olist: + if st["state"] == pkgdefs.PKG_STATE_INSTALLED: + mnames.add(m.get_pkg_stem()) + mlist.append((m, st)) + olist = mlist + onames = mnames + + return olist, onames + + def flag_pkgs(self, pfmris, state, value, progtrack): + """Sets/unsets a state flag for packages installed in + the image.""" + + if self.version < self.CURRENT_VERSION: + raise apx.ImageFormatUpdateNeeded(self.root) + + # Only the 'manual' flag can be set or unset via this + # method. + if state not in [pkgdefs.PKG_STATE_MANUAL]: + raise apx.ImagePkgStateError(pfmri, state) + + if not progtrack: + progtrack = progress.NullProgressTracker() + + with self.locked_op("state"): + icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) + kcat = self.get_catalog(self.IMG_CATALOG_KNOWN) + + progtrack.job_start(progtrack.JOB_STATE_DB) + + changed = set() + + for pfmri in pfmris: + entry = kcat.get_entry(pfmri) + mdata = entry.get("metadata", {}) + states = set(mdata.get("states", set())) + + if pkgdefs.PKG_STATE_INSTALLED not in states: + raise apx.ImagePkgStateError(pfmri, states) + + progtrack.job_add_progress(progtrack.JOB_STATE_DB) + + if value and state not in states: + states.add(state) + changed.add(pfmri) + elif not value and state in states: + states.discard(state) + changed.add(pfmri) else: - root = self.imgdir - return os.path.join(root, "pkg", pfmri.get_dir_path()) - - def get_manifest_path(self, pfmri): - """Return path to on-disk manifest file.""" - if not pfmri.publisher: - # Needed for consumers such as search that don't provide - # publisher information. - pfmri = pfmri.copy() - pfmri.publisher = self.__get_installed_pkg_publisher( - pfmri) - assert pfmri.publisher - if self.version == self.CURRENT_VERSION: - root = os.path.join(self._get_publisher_meta_root( - pfmri.publisher)) - return os.path.join(root, "pkg", pfmri.get_dir_path()) - return os.path.join(self.get_manifest_dir(pfmri), - "manifest") - - def __get_manifest(self, fmri, excludes=EmptyI, intent=None, - alt_pub=None): - """Find on-disk manifest and create in-memory Manifest - object.... grab from server if needed""" + continue - try: - if not self.has_manifest(fmri, alt_pub=alt_pub): - raise KeyError - ret = manifest.FactoredManifest(fmri, - self.get_manifest_dir(fmri), - excludes=excludes, - pathname=self.get_manifest_path(fmri)) - - # if we have a intent string, let depot - # know for what we're using the cached manifest - if intent: - alt_repo = None - if alt_pub: - alt_repo = alt_pub.repository - try: - self.transport.touch_manifest(fmri, - intent, alt_repo=alt_repo) - except (apx.UnknownPublisher, - apx.TransportError): - # It's not fatal if we can't find - # or reach the publisher. - pass - except KeyError: - ret = self.transport.get_manifest(fmri, excludes, - intent, pub=alt_pub) - return ret - - def get_manifest(self, fmri, ignore_excludes=False, intent=None, - alt_pub=None): - """return manifest; uses cached version if available. - ignore_excludes controls whether manifest contains actions - for all variants - - If 'ignore_excludes' is set to True, then all actions in the - manifest are included, regardless of variant or facet tags. If - set to False, then the variants and facets currently set in the - image will be applied, potentially filtering out some of the - actions.""" - - # Normally elide other arch variants, facets - - if ignore_excludes: - excludes = EmptyI - else: - excludes = [self.cfg.variants.allow_action, - self.cfg.facets.allow_action] + # Catalog format only supports lists. + mdata["states"] = list(states) - try: - m = self.__get_manifest(fmri, excludes=excludes, - intent=intent, alt_pub=alt_pub) - except apx.ActionExecutionError as e: - raise - except pkg.actions.ActionError as e: - raise apx.InvalidPackageErrors([e]) + # Now record the package state. + kcat.update_entry(mdata, pfmri=pfmri) + icat.update_entry(mdata, pfmri=pfmri) - return m - - def __catalog_save(self, cats, pfmris, progtrack): - - # Temporarily redirect the catalogs to a different location, - # so that if the save is interrupted, the image won't be left - # with invalid state, and then save them. - tmp_state_root = self.temporary_dir() - - try: - for cat, name in cats: - cpath = os.path.join(tmp_state_root, name) - - # Must copy the old catalog data to the new - # destination as only changed files will be - # written. - progtrack.job_add_progress( - progtrack.JOB_IMAGE_STATE) - misc.copytree(cat.meta_root, cpath) - progtrack.job_add_progress( - progtrack.JOB_IMAGE_STATE) - cat.meta_root = cpath - cat.finalize(pfmris=pfmris) - progtrack.job_add_progress( - progtrack.JOB_IMAGE_STATE) - cat.save() - progtrack.job_add_progress( - progtrack.JOB_IMAGE_STATE) - - del cat, name - self.__init_catalogs() - progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) - - # copy any other state files from current state - # dir into new state dir. - for p in os.listdir(self._statedir): - progtrack.job_add_progress( - progtrack.JOB_IMAGE_STATE) - fp = os.path.join(self._statedir, p) - if os.path.isfile(fp): - portable.copyfile(fp, - os.path.join(tmp_state_root, p)) - - # Next, preserve the old installed state dir, rename the - # new one into place, and then remove the old one. - orig_state_root = self.salvage(self._statedir, - full_path=True) - portable.rename(tmp_state_root, self._statedir) - - progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) - shutil.rmtree(orig_state_root, True) - - progtrack.job_add_progress(progtrack.JOB_IMAGE_STATE) - except EnvironmentError as e: - # shutil.Error can contains a tuple of lists of errors. - # Some of the error entries may be a tuple others will - # be a string due to poor error handling in shutil. - if isinstance(e, shutil.Error) and \ - type(e.args[0]) == list: - msg = "" - for elist in e.args: - for entry in elist: - if type(entry) == tuple: - msg += "{0}\n".format( - entry[-1]) - else: - msg += "{0}\n".format( - entry) - raise apx.UnknownErrors(msg) - raise apx._convert_error(e) - finally: - # Regardless of success, the following must happen. - self.__init_catalogs() - if os.path.exists(tmp_state_root): - shutil.rmtree(tmp_state_root, True) - - def update_pkg_installed_state(self, pkg_pairs, progtrack, origin): - """Sets the recorded installed state of each package pair in - 'pkg_pairs'. 'pkg_pair' should be an iterable of tuples of - the format (added, removed) where 'removed' is the FMRI of the - package that was uninstalled, and 'added' is the package - installed for the operation. These pairs are representative of - the destination and origin package for each part of the - operation.""" - - if self.version < self.CURRENT_VERSION: - raise apx.ImageFormatUpdateNeeded(self.root) - - kcat = self.get_catalog(self.IMG_CATALOG_KNOWN) - icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - - added = set() - removed = set() - manual = set() - updated = {} - for add_pkg, rem_pkg in pkg_pairs: - if add_pkg == rem_pkg: - continue - if add_pkg: - added.add(add_pkg) - if add_pkg in origin: - manual.add(add_pkg) - if rem_pkg: - removed.add(rem_pkg) - if add_pkg and rem_pkg: - updated[add_pkg] = \ - dict(kcat.get_entry(rem_pkg).get( - "metadata", {})) - - combo = added.union(removed) - - # If PKG_AUTOINSTALL is set in the environment, don't mark - # the installed packages as 'manually installed'. This is - # used by Kayak when creating the initial ZFS send stream. - if "PKG_AUTOINSTALL" in os.environ: - manual = set() - - progtrack.job_start(progtrack.JOB_STATE_DB) - # 'Updating package state database' - for pfmri in combo: - progtrack.job_add_progress(progtrack.JOB_STATE_DB) - entry = kcat.get_entry(pfmri) - mdata = entry.get("metadata", {}) - states = set(mdata.get("states", set())) - if pfmri in removed: - icat.remove_package(pfmri) - states.discard(pkgdefs.PKG_STATE_INSTALLED) - states.discard(pkgdefs.PKG_STATE_MANUAL) - mdata.pop("last-install", None) - mdata.pop("last-update", None) - - if pfmri in added: - states.add(pkgdefs.PKG_STATE_INSTALLED) - if pfmri in manual: - states.add(pkgdefs.PKG_STATE_MANUAL) - cur_time = pkg.catalog.now_to_basic_ts() - if pfmri in updated: - last_install = updated[pfmri].get( - "last-install") - if last_install: - mdata["last-install"] = \ - last_install - mdata["last-update"] = \ - cur_time - else: - mdata["last-install"] = \ - cur_time - ostates = set(updated[pfmri] - .get("states", set())) - if pkgdefs.PKG_STATE_MANUAL in ostates: - states.add( - pkgdefs.PKG_STATE_MANUAL) - else: - mdata["last-install"] = cur_time - if pkgdefs.PKG_STATE_ALT_SOURCE in states: - states.discard( - pkgdefs.PKG_STATE_UPGRADABLE) - states.discard( - pkgdefs.PKG_STATE_ALT_SOURCE) - states.discard( - pkgdefs.PKG_STATE_KNOWN) - elif pkgdefs.PKG_STATE_KNOWN not in states: - # This entry is no longer available and has no - # meaningful state information, so should be - # discarded. - kcat.remove_package(pfmri) - progtrack.job_add_progress( - progtrack.JOB_STATE_DB) - continue - - if (pkgdefs.PKG_STATE_INSTALLED in states and - pkgdefs.PKG_STATE_UNINSTALLED in states) or ( - pkgdefs.PKG_STATE_KNOWN in states and - pkgdefs.PKG_STATE_UNKNOWN in states): - raise apx.ImagePkgStateError(pfmri, - states) - - # Catalog format only supports lists. - mdata["states"] = list(states) - - # Now record the package state. - kcat.update_entry(mdata, pfmri=pfmri) - - # If the package is being marked as installed, - # then it shouldn't already exist in the - # installed catalog and should be added. - if pfmri in added: - icat.append(kcat, pfmri=pfmri) - - entry = mdata = states = None - progtrack.job_add_progress(progtrack.JOB_STATE_DB) - progtrack.job_done(progtrack.JOB_STATE_DB) - - # Discard entries for alternate source packages that weren't - # installed as part of the operation. - if self.__alt_pkg_pub_map: - for pfmri in self.__alt_known_cat.fmris(): - if pfmri in added: - # Nothing to do. - continue - - entry = kcat.get_entry(pfmri) - if not entry: - # The only reason that the entry should - # not exist in the 'known' part is - # because it was removed during the - # operation. - assert pfmri in removed - continue - - states = entry.get("metadata", {}).get("states", - EmptyI) - if pkgdefs.PKG_STATE_ALT_SOURCE in states: - kcat.remove_package(pfmri) - - # Now add the publishers of packages that were installed - # from temporary sources that did not previously exist - # to the image's configuration. (But without any - # origins, sticky, and enabled.) - cfgpubs = set(self.cfg.publishers.keys()) - instpubs = set(f.publisher for f in added) - altpubs = self.__alt_known_cat.publishers() - - # List of publishers that need to be added is the - # intersection of installed and alternate minus - # the already configured. - newpubs = (instpubs & altpubs) - cfgpubs - # Sort the set to get a deterministic output. - for pfx in sorted(newpubs): - npub = publisher.Publisher(pfx, - repository=publisher.Repository()) - self.__add_publisher(npub, - refresh_allowed=False) - - # Ensure image configuration reflects new information. - self.__cleanup_alt_pkg_certs() - self.save_config() - - # Remove manifests of packages that were removed from the - # system. Some packages may have only had facets or - # variants changed, so don't remove those. - - # 'Updating package cache' - progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=len(removed)) - for pfmri in removed: - mcdir = self.get_manifest_dir(pfmri) - manifest.FactoredManifest.clear_cache(mcdir) - - # Remove package cache directory if possible; we don't - # care if it fails. - try: - os.rmdir(os.path.dirname(mcdir)) - except: - pass - - mpath = self.get_manifest_path(pfmri) - try: - portable.remove(mpath) - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise apx._convert_error(e) - - # Remove package manifest directory if possible; we - # don't care if it fails. - try: - os.rmdir(os.path.dirname(mpath)) - except: - pass - progtrack.job_add_progress(progtrack.JOB_PKG_CACHE) - progtrack.job_done(progtrack.JOB_PKG_CACHE) + progtrack.job_done(progtrack.JOB_STATE_DB) + if len(changed): progtrack.job_start(progtrack.JOB_IMAGE_STATE) - self.__catalog_save( - [(kcat, self.IMG_CATALOG_KNOWN), - (icat, self.IMG_CATALOG_INSTALLED)], - added, progtrack) - + [ + (kcat, self.IMG_CATALOG_KNOWN), + (icat, self.IMG_CATALOG_INSTALLED), + ], + changed, + progtrack, + ) progtrack.job_done(progtrack.JOB_IMAGE_STATE) - def get_catalog(self, name): - """Returns the requested image catalog. - - 'name' must be one of the following image constants: - IMG_CATALOG_KNOWN - The known catalog contains all of packages that are - installed or available from a publisher's repository. - - IMG_CATALOG_INSTALLED - The installed catalog is a subset of the 'known' - catalog that only contains installed packages.""" - - if not self.imgdir: - raise RuntimeError("self.imgdir must be set") - - cat = self.__catalogs.get(name) - if not cat: - cat = self.__get_catalog(name) - self.__catalogs[name] = cat - - if name == self.IMG_CATALOG_KNOWN: - # Apply alternate package source data every time that - # the known catalog is requested. - self.__apply_alt_pkg_sources(cat) - - return cat - - def _manifest_cb(self, cat, f): - # Only allow lazy-load for packages from non-v1 sources. - # Assume entries for other sources have all data - # required in catalog. This prevents manifest retrieval - # for packages that don't have any related action data - # in the catalog because they don't have any related - # action data in their manifest. - entry = cat.get_entry(f) - states = entry["metadata"]["states"] - if pkgdefs.PKG_STATE_V1 not in states: - return self.get_manifest(f, ignore_excludes=True) - return - - def __get_catalog(self, name): - """Private method to retrieve catalog; this bypasses the - normal automatic caching (unless the image hasn't been - upgraded yet).""" - - if self.__upgraded and self.version < 3: - # Assume the catalog is already cached in this case - # and can't be reloaded from disk as it doesn't exist - # on disk yet. - return self.__catalogs[name] - - croot = os.path.join(self._statedir, name) - try: - os.makedirs(croot) - except EnvironmentError as e: - if e.errno in (errno.EACCES, errno.EROFS): - # Allow operations to work for - # unprivileged users. - croot = None - elif e.errno != errno.EEXIST: - raise - - # batch_mode is set to True here as any operations that modify - # the catalogs (add or remove entries) are only done during an - # image upgrade or metadata refresh. In both cases, the catalog - # is resorted and finalized so this is always safe to use. - cat = pkg.catalog.Catalog(batch_mode=True, - manifest_cb=self._manifest_cb, meta_root=croot, sign=False) - return cat - - def __remove_catalogs(self): - """Removes all image catalogs and their directories.""" - - self.__init_catalogs() - for name in (self.IMG_CATALOG_KNOWN, - self.IMG_CATALOG_INSTALLED): - shutil.rmtree(os.path.join(self._statedir, name)) - - def get_version_installed(self, pfmri): - """Returns an fmri of the installed package matching the - package stem of the given fmri or None if no match is found.""" - - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - for ver, fmris in cat.fmris_by_version(pfmri.pkg_name): - return fmris[0] - return None - - def get_pkg_repo(self, pfmri): - """Returns the repository object containing the origins that - should be used to retrieve the specified package or None if - it can be retrieved from all sources or is not a known package. - """ - - assert pfmri.publisher - cat = self.get_catalog(self.IMG_CATALOG_KNOWN) - entry = cat.get_entry(pfmri) - if entry is None: - # Package not known. - return - - try: - slist = entry["metadata"]["sources"] - except KeyError: - # Can be retrieved from any source. - return - else: - if not slist: - # Can be retrieved from any source. - return - - pub = self.get_publisher(prefix=pfmri.publisher) - repo = copy.copy(pub.repository) - norigins = [ - o for o in repo.origins - if o.uri in slist - ] - - if not norigins: - # Known sources don't match configured; return so that - # caller can fallback to default behaviour. - return - - repo.origins = norigins - return repo - - def get_pkg_state(self, pfmri): - """Returns the list of states a package is in for this image.""" - - cat = self.get_catalog(self.IMG_CATALOG_KNOWN) - entry = cat.get_entry(pfmri) - if entry is None: - return [] - return entry["metadata"]["states"] - - def is_pkg_installed(self, pfmri): - """Returns a boolean value indicating whether the specified - package is installed.""" - - # Avoid loading the installed catalog if the known catalog - # is already loaded. This is safe since the installed - # catalog is a subset of the known, and a specific entry - # is being retrieved. - if not self.__catalog_loaded(self.IMG_CATALOG_KNOWN): - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - else: - cat = self.get_catalog(self.IMG_CATALOG_KNOWN) - - entry = cat.get_entry(pfmri) - if entry is None: - return False - states = entry["metadata"]["states"] - return pkgdefs.PKG_STATE_INSTALLED in states - - def list_excludes(self, new_variants=None, new_facets=None): - """Generate a list of callables that each return True if an - action is to be included in the image using the currently - defined variants & facets for the image, or an updated set if - new_variants or new_facets are specified.""" - - if new_variants: - new_vars = self.cfg.variants.copy() - new_vars.update(new_variants) - var_call = new_vars.allow_action - else: - var_call = self.cfg.variants.allow_action - if new_facets is not None: - fac_call = new_facets.allow_action - else: - fac_call = self.cfg.facets.allow_action - - return [var_call, fac_call] - - def get_variants(self): - """ return a copy of the current image variants""" - return self.cfg.variants.copy() - - def get_facets(self): - """ Return a copy of the current image facets""" - return self.cfg.facets.copy() - - def __state_updating_pathname(self): - """Return the path to a flag file indicating that the image - catalog is being updated.""" - return os.path.join(self._statedir, self.__STATE_UPDATING_FILE) - - def __start_state_update(self): - """Called when we start updating the image catalog. Normally - returns False, but will return True if a previous update was - interrupted.""" - - # get the path to the image catalog update flag file - pathname = self.__state_updating_pathname() - - # if the flag file exists a previous update was interrupted so - # return 1 - if os.path.exists(pathname): - return True - - # create the flag file and return 0 - file_mode = misc.PKG_FILE_MODE - try: - with open(pathname, "w"): - os.chmod(pathname, file_mode) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException(e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - raise - return False - - def __end_state_update(self): - """Called when we're done updating the image catalog.""" - - # get the path to the image catalog update flag file - pathname = self.__state_updating_pathname() - - # delete the flag file. - try: - portable.remove(pathname) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException(e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - raise - - def __rebuild_image_catalogs(self, progtrack=None): - """Rebuilds the image catalogs based on the available publisher - catalogs.""" - - if self.version < 3: - raise apx.ImageFormatUpdateNeeded(self.root) - - if not progtrack: - progtrack = progress.NullProgressTracker() - - progtrack.cache_catalogs_start() - - publist = list(self.gen_publishers()) - - be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root) - self.history.log_operation_start("rebuild-image-catalogs", - be_name=be_name, be_uuid=be_uuid) - - # Mark all operations as occurring at this time. - op_time = datetime.datetime.utcnow() - - # The image catalogs need to be updated, but this is a bit - # tricky as previously known packages must remain known even - # if PKG_STATE_KNOWN is no longer true if any other state - # information is present. This is to allow freezing, etc. of - # package states on a permanent basis even if the package is - # no longer available from a publisher repository. However, - # this is only True of installed packages. - old_icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - - # batch_mode is set to True here since without it, catalog - # population time is almost doubled (since the catalog is - # re-sorted and stats are generated for every operation). - # In addition, the new catalog is first created in a new - # temporary directory so that it can be moved into place - # at the very end of this process (to minimize the chance - # that failure or interruption will cause the image to be - # left in an inconsistent state). - tmp_state_root = self.temporary_dir() - - # Copy any regular files placed in the state directory - for p in os.listdir(self._statedir): - if p == self.__STATE_UPDATING_FILE: - # don't copy the state updating file - continue - fp = os.path.join(self._statedir, p) - if os.path.isfile(fp): - portable.copyfile(fp, os.path.join(tmp_state_root, p)) - - kcat = pkg.catalog.Catalog(batch_mode=True, - meta_root=os.path.join(tmp_state_root, - self.IMG_CATALOG_KNOWN), sign=False) - - # XXX if any of the below fails for any reason, the old 'known' - # catalog needs to be re-loaded so the client is in a consistent - # state. - - # All enabled publisher catalogs must be processed. - pub_cats = [(pub.prefix, pub.catalog) for pub in publist] - - # XXX For backwards compatibility, 'upgradability' of packages - # is calculated and stored based on whether a given pkg stem - # matches the newest version in the catalog. This is quite - # expensive (due to overhead), but at least the cost is - # consolidated here. This comparison is also cross-publisher, - # as it used to be. In the future, it could likely be improved - # by usage of the SAT solver. - newest = {} - for pfx, cat in [(None, old_icat)] + pub_cats: - for f in cat.fmris(last=True, - pubs=pfx and [pfx] or EmptyI): - nver, snver = newest.get(f.pkg_name, (None, - None)) - if f.version > nver: - newest[f.pkg_name] = (f.version, - str(f.version)) - - # Next, copy all of the entries for the catalog parts that - # currently exist into the image 'known' catalog. - - # Iterator for source parts. - sparts = ( - (pfx, cat, name, cat.get_part(name, must_exist=True)) - for pfx, cat in pub_cats - for name in cat.parts + print( + "{} package{} updated.".format( + len(changed), "s" if len(changed) != 1 else "" ) + ) + + def avoid_pkgs(self, pat_list, progtrack, check_cancel): + """Avoid the specified packages... use pattern matching on + names; ignore versions.""" + + with self.locked_op("avoid"): + ip = imageplan.ImagePlan + self._avoid_set_save( + self.avoid_set_get() + | set(ip.match_user_stems(self, pat_list, ip.MATCH_UNINSTALLED)) + ) + + def unavoid_pkgs(self, pat_list, progtrack, check_cancel): + """Unavoid the specified packages... use pattern matching on + names; ignore versions.""" + + with self.locked_op("unavoid"): + ip = imageplan.ImagePlan + unavoid_set = set(ip.match_user_stems(self, pat_list, ip.MATCH_ALL)) + current_set = self.avoid_set_get() + not_avoided = unavoid_set - current_set + if not_avoided: + raise apx.PlanCreationException(not_avoided=not_avoided) + + # Don't allow unavoid if removal of the package from the + # avoid list would require the package to be installed + # as this would invalidate current image state. If the + # package is already installed though, it doesn't really + # matter if it's a target of an avoid or not. + installed_set = set([f.pkg_name for f in self.gen_installed_pkgs()]) + + would_install = [ + a + for f, a in self.gen_tracked_stems() + if a in unavoid_set and a not in installed_set + ] + + if would_install: + raise apx.PlanCreationException(would_install=would_install) + + self._avoid_set_save(current_set - unavoid_set) + + def get_avoid_dict(self): + """return dict of lists (avoided stem, pkgs w/ group + dependencies on this pkg)""" + ret = dict((a, list()) for a in self.avoid_set_get()) + for fmri, group in self.gen_tracked_stems(): + if group in ret: + ret[group].append(fmri.pkg_name) + return ret + + def freeze_pkgs(self, pat_list, progtrack, check_cancel, dry_run, comment): + """Freeze the specified packages... use pattern matching on + names. + + The 'pat_list' parameter contains the list of patterns of + packages to freeze. + + The 'progtrack' parameter contains the progress tracker for this + operation. + + The 'check_cancel' parameter contains a function to call to + check if the operation has been canceled. + + The 'dry_run' parameter controls whether packages are actually + frozen. + + The 'comment' parameter contains the comment, if any, which will + be associated with the packages that are frozen. + """ - # Build list of installed packages based on actual state - # information just in case there is a state issue from an - # older client. - # Also stash away any old metadata, in particular we want - # the last-install and last-update times but maybe other - # metadata will be useful in the future. - inst_stems = {} - for t, entry in old_icat.tuple_entries(): - states = entry["metadata"]["states"] - if pkgdefs.PKG_STATE_INSTALLED not in states: - continue - pub, stem, ver = t - inst_stems.setdefault(pub, {}) - inst_stems[pub].setdefault(stem, {}) - inst_stems[pub][stem][ver] = { "installed": False, - "metadata" : entry["metadata"] } - - # Create the new installed catalog in a temporary location. - icat = pkg.catalog.Catalog(batch_mode=True, - meta_root=os.path.join(tmp_state_root, - self.IMG_CATALOG_INSTALLED), sign=False) - - excludes = self.list_excludes() - - frozen_pkgs = dict([ - (p[0].pkg_name, p[0]) for p in self.get_frozen_list() - ]) - for pfx, cat, name, spart in sparts: - # 'spart' is the source part. - if spart is None: - # Client hasn't retrieved this part. - continue - - # New known part. - nkpart = kcat.get_part(name) - nipart = icat.get_part(name) - base = name.startswith("catalog.base.") - - # Avoid accessor overhead since these will be - # used for every entry. - cat_ver = cat.version - dp = cat.get_part("catalog.dependency.C", - must_exist=True) - - for t, sentry in spart.tuple_entries(pubs=[pfx]): - pub, stem, ver = t - - installed = False - if pub in inst_stems and \ - stem in inst_stems[pub] and \ - ver in inst_stems[pub][stem]: - installed = True - inst_stems[pub][stem][ver]["installed"] = True - - # copy() is too slow here and catalog entries - # are shallow so this should be sufficient. - entry = dict(six.iteritems(sentry)) - if not base: - # Nothing else to do except add the - # entry for non-base catalog parts. - nkpart.add(metadata=entry, - op_time=op_time, pub=pub, stem=stem, - ver=ver) - if installed: - nipart.add(metadata=entry, - op_time=op_time, pub=pub, - stem=stem, ver=ver) - continue - - # Only the base catalog part stores package - # state information and/or other metadata. - mdata = entry.setdefault("metadata", {}) - states = mdata.setdefault("states", []) - states.append(pkgdefs.PKG_STATE_KNOWN) - - if cat_ver == 0: - states.append(pkgdefs.PKG_STATE_V0) - elif pkgdefs.PKG_STATE_V0 not in states: - # Assume V1 catalog source. - states.append(pkgdefs.PKG_STATE_V1) - - if installed: - states.append( - pkgdefs.PKG_STATE_INSTALLED) - # Preserve the dates of install/update - # if present in the old metadata - md = inst_stems[pub][stem][ver]["metadata"] - for key in ["last-install", "last-update"]: - if key in md: - entry["metadata"][key] = md[key] - # Preserve the manual installation - # flag, if present in the old metadata - ostates = set(md.get("states", set())) - if pkgdefs.PKG_STATE_MANUAL in ostates: - states.append( - pkgdefs.PKG_STATE_MANUAL) - - nver, snver = newest.get(stem, (None, None)) - if snver is not None and ver != snver: - states.append( - pkgdefs.PKG_STATE_UPGRADABLE) - - # Check if the package is frozen. - if stem in frozen_pkgs: - f_ver = frozen_pkgs[stem].version - if f_ver == ver or \ - pkg.version.Version(ver - ).is_successor(f_ver, - constraint= - pkg.version.CONSTRAINT_AUTO): - states.append( - pkgdefs.PKG_STATE_FROZEN) - - # Determine if package is obsolete or has been - # renamed and mark with appropriate state. - dpent = None - if dp is not None: - dpent = dp.get_entry(pub=pub, stem=stem, - ver=ver) - if dpent is not None: - for a in dpent["actions"]: - # Constructing action objects - # for every action would be a - # lot slower, so a simple string - # match is done first so that - # only interesting actions get - # constructed. - if not a.startswith("set"): - continue - if not ("pkg.obsolete" in a or \ - "pkg.renamed" in a or \ - "pkg.legacy" in a): - continue - - try: - act = pkg.actions.fromstr(a) - except pkg.actions.ActionError: - # If the action can't be - # parsed or is not yet - # supported, continue. - continue - - if act.attrs["value"].lower() != "true": - continue - - if act.attrs["name"] == "pkg.obsolete": - states.append( - pkgdefs.PKG_STATE_OBSOLETE) - elif act.attrs["name"] == "pkg.renamed": - if not act.include_this( - excludes, publisher=pub): - continue - states.append( - pkgdefs.PKG_STATE_RENAMED) - elif act.attrs["name"] == "pkg.legacy": - states.append( - pkgdefs.PKG_STATE_LEGACY) - - mdata["states"] = states - - # Add base entries. - nkpart.add(metadata=entry, op_time=op_time, - pub=pub, stem=stem, ver=ver) - if installed: - nipart.add(metadata=entry, - op_time=op_time, pub=pub, stem=stem, - ver=ver) - - # Now add installed packages to list of known packages using - # previous state information. While doing so, track any - # new entries as the versions for the stem of the entry will - # need to be passed to finalize() for sorting. - final_fmris = [] - for name in old_icat.parts: - # Old installed part. - ipart = old_icat.get_part(name, must_exist=True) - - # New known part. - nkpart = kcat.get_part(name) - - # New installed part. - nipart = icat.get_part(name) - - base = name.startswith("catalog.base.") - - mdata = None - for t, entry in ipart.tuple_entries(): - pub, stem, ver = t - - if pub not in inst_stems or \ - stem not in inst_stems[pub] or \ - ver not in inst_stems[pub][stem] or \ - inst_stems[pub][stem][ver]["installed"]: - # Entry is no longer valid or is already - # known. - continue - - if base: - mdata = entry["metadata"] - states = set(mdata["states"]) - states.discard(pkgdefs.PKG_STATE_KNOWN) - - nver, snver = newest.get(stem, (None, - None)) - if not nver or \ - (snver is not None and ver == snver): - states.discard( - pkgdefs.PKG_STATE_UPGRADABLE) - elif snver is not None: - states.add( - pkgdefs.PKG_STATE_UPGRADABLE) - # Check if the package is frozen. - if stem in frozen_pkgs: - f_ver = frozen_pkgs[stem].version - if f_ver == ver or \ - pkg.version.Version(ver - ).is_successor(f_ver, - constraint= - pkg.version.CONSTRAINT_AUTO): - states.add( - pkgdefs.PKG_STATE_FROZEN) - else: - states.discard( - pkgdefs.PKG_STATE_FROZEN) - - mdata["states"] = list(states) - - # Add entries. - nkpart.add(metadata=entry, op_time=op_time, - pub=pub, stem=stem, ver=ver) - nipart.add(metadata=entry, op_time=op_time, - pub=pub, stem=stem, ver=ver) - final_fmris.append(pkg.fmri.PkgFmri(name=stem, - publisher=pub, version=ver)) - - # Save the new catalogs. - for cat in kcat, icat: - misc.makedirs(cat.meta_root) - cat.finalize(pfmris=final_fmris) - cat.save() - - # Next, preserve the old installed state dir, rename the - # new one into place, and then remove the old one. - orig_state_root = self.salvage(self._statedir, full_path=True) - portable.rename(tmp_state_root, self._statedir) - shutil.rmtree(orig_state_root, True) - - # Ensure in-memory catalogs get reloaded. - self.__init_catalogs() - - self.update_last_modified() - progtrack.cache_catalogs_done() - self.history.log_operation_end() - - def refresh_publishers(self, full_refresh=False, immediate=False, - pubs=None, progtrack=None, ignore_unreachable=True): - """Refreshes the metadata (e.g. catalog) for one or more - publishers. Callers are responsible for locking the image. - - 'full_refresh' is an optional boolean value indicating whether - a full retrieval of publisher metadata (e.g. catalogs) or only - an update to the existing metadata should be performed. When - True, 'immediate' is also set to True. - - 'immediate' is an optional boolean value indicating whether the - a refresh should occur now. If False, a publisher's selected - repository will only be checked for updates if the update - interval period recorded in the image configuration has been - exceeded. - - 'pubs' is a list of publisher prefixes or publisher objects - to refresh. Passing an empty list or using the default value - implies all publishers. - - 'ignore_unreachable' is an optional boolean value indicating - whether unreachable repositories should be ignored. If True, - errors contacting this repository are stored in the transport - but no exception is raised, allowing an operation to continue - if an unneeded repository is not online.""" - - if self.version < 3: - raise apx.ImageFormatUpdateNeeded(self.root) - - if not progtrack: - progtrack = progress.NullProgressTracker() - - be_name, be_uuid = bootenv.BootEnv.get_be_name(self.root) - self.history.log_operation_start("refresh-publishers", - be_name=be_name, be_uuid=be_uuid) - - pubs_to_refresh = [] - - if not pubs: - # Omit disabled publishers. - pubs = [p for p in self.gen_publishers()] - - if not pubs: - self.__rebuild_image_catalogs(progtrack=progtrack) - return - - for pub in pubs: - p = pub - if not isinstance(p, publisher.Publisher): - p = self.get_publisher(prefix=p) - if p.disabled: - e = apx.DisabledPublisher(p) - self.history.log_operation_end(error=e) - raise e - pubs_to_refresh.append(p) - - if not pubs_to_refresh: - self.history.log_operation_end( - result=history.RESULT_NOTHING_TO_DO) - return - - # Verify validity of certificates before attempting network - # operations. - try: - self.check_cert_validity(pubs=pubs_to_refresh) - except apx.ExpiringCertificate as e: - logger.error(str(e)) - - try: - # Ensure Image directory structure is valid. - self.mkdirs() - except Exception as e: - self.history.log_operation_end(error=e) - raise - - progtrack.refresh_start(len(pubs_to_refresh), - full_refresh=full_refresh) - - failed = [] - total = 0 - succeeded = set() - updated = self.__start_state_update() - for pub in pubs_to_refresh: - total += 1 - progtrack.refresh_start_pub(pub) - try: - changed, e = pub.refresh( - full_refresh=full_refresh, - immediate=immediate, progtrack=progtrack) - if changed: - updated = True - - if not ignore_unreachable and e: - failed.append((pub, e)) - continue - - except apx.PermissionsException as e: - failed.append((pub, e)) - # No point in continuing since no data can - # be written. - break - except apx.ApiException as e: - failed.append((pub, e)) - continue - finally: - progtrack.refresh_end_pub(pub) - succeeded.add(pub.prefix) - - progtrack.refresh_done() - - if updated: - self.__rebuild_image_catalogs(progtrack=progtrack) - # Ensure any configuration or metadata changes made - # during refresh are reflected in on-disk state. - self.save_config() - else: - self.__end_state_update() - - if failed: - e = apx.CatalogRefreshException(failed, total, - len(succeeded)) - self.history.log_operation_end(error=e) - raise e - - if not updated: - self.history.log_operation_end( - result=history.RESULT_NOTHING_TO_DO) - return - self.history.log_operation_end() - - def _get_publisher_meta_dir(self): - if self.version >= 3: - return IMG_PUB_DIR - return "catalog" - - def _get_publisher_cache_root(self, prefix): - return os.path.join(self.imgdir, "cache", "publisher", prefix) - - def _get_publisher_meta_root(self, prefix): - return os.path.join(self.imgdir, self._get_publisher_meta_dir(), - prefix) - - def remove_publisher_metadata(self, pub, progtrack=None, rebuild=True): - """Removes the metadata for the specified publisher object, - except data for installed packages. - - 'pub' is the object of the publisher to remove the data for. - - 'progtrack' is an optional ProgressTracker object. - - 'rebuild' is an optional boolean specifying whether image - catalogs should be rebuilt after removing the publisher's - metadata. - """ - - if self.version < 4: - # Older images don't require fine-grained deletion. - pub.remove_meta_root() - if rebuild: - self.__rebuild_image_catalogs( - progtrack=progtrack) - return - - # Build a list of paths that shouldn't be removed because they - # belong to installed packages. - excluded = [ - self.get_manifest_path(f) - for f in self.gen_installed_pkgs() - if f.publisher == pub.prefix + def __make_publisherless_fmri(pat): + p = pkg.fmri.MatchingPkgFmri(pat) + p.publisher = None + return p + + def __calc_frozen(): + stems_and_pats = imageplan.ImagePlan.freeze_pkgs_match( + self, pat_list + ) + return dict( + [ + (s, __make_publisherless_fmri(p)) + for s, p in six.iteritems(stems_and_pats) ] - - if not excluded: - pub.remove_meta_root() - else: - try: - # Discard all publisher metadata except - # package manifests as a first pass. - for entry in os.listdir(pub.meta_root): - if entry == "pkg": - continue - - target = os.path.join(pub.meta_root, - entry) - if os.path.isdir(target): - shutil.rmtree(target, - ignore_errors=True) - else: - portable.remove(target) - - # Build the list of directories that can't be - # removed. - exdirs = [os.path.dirname(e) for e in excluded] - - # Now try to discard only package manifests - # that aren't for installed packages. - mroot = os.path.join(pub.meta_root, "pkg") - for pdir in os.listdir(mroot): - proot = os.path.join(mroot, pdir) - if proot not in exdirs: - # This removes all manifest data - # for a given package stem. - shutil.rmtree(proot, - ignore_errors=True) - continue - - # Remove only manifest data for packages - # that are not installed. - for mname in os.listdir(proot): - mpath = os.path.join(proot, - mname) - if mpath not in excluded: - portable.remove(mpath) - - # Finally, dump any cache data for this - # publisher if possible. - shutil.rmtree(self._get_publisher_cache_root( - pub.prefix), ignore_errors=True) - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise apx._convert_error(e) - - if rebuild: - self.__rebuild_image_catalogs(progtrack=progtrack) - - def gen_installed_pkg_names(self, anarchy=True): - """A generator function that produces FMRI strings as it - iterates over the list of installed packages. This is - faster than gen_installed_pkgs when only the FMRI string - is needed.""" - - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - for f in cat.fmris(objects=False): - if anarchy: - # Catalog entries always have publisher prefix. - yield "pkg:/{0}".format(f[6:].split("/", 1)[-1]) - continue - yield f - - def gen_installed_pkgs(self, pubs=EmptyI, ordered=False): - """Return an iteration through the installed packages.""" - - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - for f in cat.fmris(pubs=pubs, ordered=ordered): - yield f - - def count_installed_pkgs(self, pubs=EmptyI): - """Return the number of installed packages.""" - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - assert cat.package_count == cat.package_version_count - return sum( - pkg_count - for (pub, pkg_count, _ignored) in - cat.get_package_counts_by_pub(pubs=pubs) + ) + + if dry_run: + return list(__calc_frozen().values()) + with self.locked_op("freeze"): + stems_and_pats = __calc_frozen() + # Get existing dictionary of frozen packages. + d = self.__freeze_dict_load() + # Update the dictionary with the new freezes and + # comment. + timestamp = calendar.timegm(time.gmtime()) + d.update( + [ + (s, (str(p), comment, timestamp)) + for s, p in six.iteritems(stems_and_pats) + ] + ) + self._freeze_dict_save(d) + return list(stems_and_pats.values()) + + def unfreeze_pkgs(self, pat_list, progtrack, check_cancel, dry_run): + """Unfreeze the specified packages... use pattern matching on + names; ignore versions. + + The 'pat_list' parameter contains the list of patterns of + packages to freeze. + + The 'progtrack' parameter contains the progress tracker for this + operation. + + The 'check_cancel' parameter contains a function to call to + check if the operation has been canceled. + + The 'dry_run' parameter controls whether packages are actually + frozen.""" + + def __calc_unfrozen(): + # Get existing dictionary of frozen packages. + d = self.__freeze_dict_load() + # Match the user's patterns against the frozen packages + # and return the stems which matched, and the dictionary + # of the currently frozen packages. + ip = imageplan.ImagePlan + return ( + set( + ip.match_user_stems( + self, + pat_list, + ip.MATCH_ALL, + raise_unmatched=False, + universe=[(None, k) for k in d.keys()], + ) + ), + d, + ) + + if dry_run: + return __calc_unfrozen()[0] + with self.locked_op("freeze"): + unfrozen_set, d = __calc_unfrozen() + # Remove the specified packages from the frozen set. + for n in unfrozen_set: + d.pop(n, None) + self._freeze_dict_save(d) + return unfrozen_set + + def __call_imageplan_evaluate(self, ip): + # A plan can be requested without actually performing an + # operation on the image. + if self.history.operation_name: + self.history.operation_start_state = ip.get_plan() + + try: + ip.evaluate() + except apx.ConflictingActionErrors: + # Image plan evaluation can fail because of duplicate + # action discovery, but we still want to be able to + # display and log the solved FMRI changes. + self.imageplan = ip + if self.history.operation_name: + self.history.operation_end_state = ( + "Unevaluated: merged plan had errors\n" + + ip.get_plan(full=False) ) + raise + + self.imageplan = ip + + if self.history.operation_name: + self.history.operation_end_state = ip.get_plan(full=False) + + def __make_plan_common( + self, + _op, + _progtrack, + _check_cancel, + _noexecute, + _ip_noop=False, + **kwargs, + ): + """Private helper function to perform base plan creation and + cleanup. + """ - def gen_tracked_stems(self): - """Return an iteration through all the tracked pkg stems - in the set of currently installed packages. Return value - is group pkg fmri, stem""" - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - excludes = self.list_excludes() - - for f in cat.fmris(): - for a in cat.get_entry_actions(f, - [pkg.catalog.Catalog.DEPENDENCY], excludes=excludes): - if a.name == "depend" and a.attrs["type"] == "group": - yield (f, self.strtofmri( - a.attrs["fmri"]).pkg_name) - - def _create_fast_lookups(self, progtrack=None): - """Create an on-disk database mapping action name and key - attribute value to the action string comprising the unique - attributes of the action, for all installed actions. This is - done with a file mapping the tuple to an offset into a second - file, where those actions are kept. Once the offsets are loaded - into memory, it is simple to seek into the second file to the - given offset and read until you hit an action that doesn't - match.""" - - if not progtrack: - progtrack = progress.NullProgressTracker() - - self.__actdict = None - self.__actdict_timestamp = None - stripped_path = os.path.join(self.__action_cache_dir, - "actions.stripped") - offsets_path = os.path.join(self.__action_cache_dir, - "actions.offsets") - conflicting_keys_path = os.path.join(self.__action_cache_dir, - "keys.conflicting") - - excludes = self.list_excludes() - heap = [] - - # nsd is the "name-space dictionary." It maps action name - # spaces (see action.generic for more information) to - # dictionaries which map keys to pairs which contain an action - # with that key and the pfmri of the package which delivered the - # action. - nsd = {} - - from heapq import heappush, heappop - - progtrack.job_start(progtrack.JOB_FAST_LOOKUP) - - for pfmri in self.gen_installed_pkgs(): - progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) - m = self.get_manifest(pfmri, ignore_excludes=True) - for act in m.gen_actions(excludes=excludes): - if not act.globally_identical: - continue - act.strip() - heappush(heap, (act.name, - act.attrs[act.key_attr], pfmri, act)) - nsd.setdefault(act.namespace_group, {}) - nsd[act.namespace_group].setdefault( - act.attrs[act.key_attr], []) - nsd[act.namespace_group][ - act.attrs[act.key_attr]].append(( - act, pfmri)) - - progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) - - # If we can't write the temporary files, then there's no point - # in producing actdict because it depends on a synchronized - # stripped actions file. - try: - actdict = {} - sf, sp = self.temporary_file(close=False) - of, op = self.temporary_file(close=False) - bf, bp = self.temporary_file(close=False) - - sf = os.fdopen(sf, "w") - of = os.fdopen(of, "w") - bf = os.fdopen(bf, "w") - - # We need to make sure the files are coordinated. - timestamp = int(time.time()) - sf.write("VERSION 1\n{0}\n".format(timestamp)) - of.write("VERSION 2\n{0}\n".format(timestamp)) - # The conflicting keys file doesn't need a timestamp - # because it's not coordinated with the stripped or - # offsets files and the result of loading it isn't - # reused by this class. - bf.write("VERSION 1\n") - - cnt, offset_update_bytes = 0, 0 - last_name, last_key, last_offset = None, None, sf.tell() - while heap: - # This is a tight loop, so try to avoid burning - # CPU calling into the progress tracker - # excessively. - if len(heap) % 100 == 0: - progtrack.job_add_progress( - progtrack.JOB_FAST_LOOKUP) - item = heappop(heap) - fmri, act = item[2:] - key = act.attrs[act.key_attr] - if act.name != last_name or key != last_key: - if last_name is None: - assert last_key is None - cnt += 1 - last_name = act.name - last_key = key - else: - assert cnt > 0 - of.write("{0} {1} {2} {3}\n".format( - last_name, last_offset, - cnt, last_key)) - actdict[(last_name, last_key)] = last_offset, cnt - last_name, last_key = act.name, key - last_offset += offset_update_bytes - offset_update_bytes = 0 - cnt = 1 - else: - cnt += 1 - sf_line = f"{fmri} {act}\n" - sf.write(sf_line) - offset_update_bytes += len(sf_line.encode('utf-8')) - if last_name is not None: - assert last_key is not None - assert last_offset is not None - assert cnt > 0 - of.write("{0} {1} {2} {3}\n".format( - last_name, last_offset, cnt, last_key)) - actdict[(last_name, last_key)] = \ - last_offset, cnt - - progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) - - bad_keys = imageplan.ImagePlan._check_actions(nsd) - for k in sorted(bad_keys): - bf.write("{0}\n".format(k)) - - progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) - sf.close() - of.close() - bf.close() - os.chmod(sp, misc.PKG_FILE_MODE) - os.chmod(op, misc.PKG_FILE_MODE) - os.chmod(bp, misc.PKG_FILE_MODE) - except BaseException as e: - try: - os.unlink(sp) - os.unlink(op) - os.unlink(bp) - except: - pass - raise - - progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) - - # Finally, rename the temporary files into their final place. - # If we have any problems, do our best to remove them, and we'll - # try to recreate them on the read-side. - try: - if not os.path.exists(self.__action_cache_dir): - os.makedirs(self.__action_cache_dir) - portable.rename(sp, stripped_path) - portable.rename(op, offsets_path) - portable.rename(bp, conflicting_keys_path) - except EnvironmentError as e: - if e.errno == errno.EACCES or e.errno == errno.EROFS: - self.__action_cache_dir = self.temporary_dir() - stripped_path = os.path.join( - self.__action_cache_dir, "actions.stripped") - offsets_path = os.path.join( - self.__action_cache_dir, "actions.offsets") - conflicting_keys_path = os.path.join( - self.__action_cache_dir, "keys.conflicting") - portable.rename(sp, stripped_path) - portable.rename(op, offsets_path) - portable.rename(bp, conflicting_keys_path) - else: - exc_info = sys.exc_info() - try: - os.unlink(stripped_path) - os.unlink(offsets_path) - os.unlink(conflicting_keys_path) - except: - pass - six.reraise(exc_info[0], exc_info[1], exc_info[2]) - - progtrack.job_add_progress(progtrack.JOB_FAST_LOOKUP) - progtrack.job_done(progtrack.JOB_FAST_LOOKUP) - return actdict, timestamp - - def _remove_fast_lookups(self): - """Remove on-disk database created by _create_fast_lookups. - Should be called before updating image state to prevent the - client from seeing stale state if _create_fast_lookups is - interrupted.""" - - for fname in ("actions.stripped", "actions.offsets", - "keys.conflicting"): - try: - portable.remove(os.path.join( - self.__action_cache_dir, fname)) - except EnvironmentError as e: - if e.errno == errno.ENOENT: - continue - raise apx._convert_error(e) - - def _load_actdict(self, progtrack): - """Read the file of offsets created in _create_fast_lookups() - and return the dictionary mapping action name and key value to - offset.""" - - try: - of = open(os.path.join(self.__action_cache_dir, - "actions.offsets"), "r") - except IOError as e: - if e.errno != errno.ENOENT: - raise - actdict, otimestamp = self._create_fast_lookups() - assert actdict is not None - self.__actdict = actdict - self.__actdict_timestamp = otimestamp - return actdict - - # Make sure the files are paired, and try to create them if not. - oversion = of.readline().rstrip() - otimestamp = of.readline().rstrip() - - # The original action.offsets file existed and had the same - # timestamp as the stored actdict, so that actdict can be - # reused. - if self.__actdict and otimestamp == self.__actdict_timestamp: - return self.__actdict - - sversion, stimestamp = self._get_stripped_actions_file( - internal=True) - - # If we recognize neither file's version or their timestamps - # don't match, then we blow them away and try again. - if oversion != "VERSION 2" or sversion != "VERSION 1" or \ - stimestamp != otimestamp: - of.close() - actdict, otimestamp = self._create_fast_lookups() - assert actdict is not None - self.__actdict = actdict - self.__actdict_timestamp = otimestamp - return actdict - - # At this point, the original actions.offsets file existed, no - # actdict was saved in the image, the versions matched what was - # expected, and the timestamps of the actions.offsets and - # actions.stripped files matched, so the actions.offsets file is - # parsed to generate actdict. - actdict = {} - - for line in of: - actname, offset, cnt, key_attr = \ - line.rstrip().split(None, 3) - off = int(offset) - actdict[(actname, key_attr)] = (off, int(cnt)) - - # This is a tight loop, so try to avoid burning - # CPU calling into the progress tracker excessively. - # Since we are already using the offset, we use that - # to damp calls back into the progress tracker. - if off % 500 == 0: - progtrack.plan_add_progress( - progtrack.PLAN_ACTION_CONFLICT) - - of.close() - self.__actdict = actdict - self.__actdict_timestamp = otimestamp - return actdict - - def _get_stripped_actions_file(self, internal=False): - """Open the actions file described in _create_fast_lookups() and - return the corresponding file object.""" - - sf = open(os.path.join(self.__action_cache_dir, - "actions.stripped"), "r") - sversion = sf.readline().rstrip() - stimestamp = sf.readline().rstrip() - if internal: - sf.close() - return sversion, stimestamp - - return sf - - def _load_conflicting_keys(self): - """Load the list of keys which have conflicting actions in the - existing image. If no such list exists, then return None.""" - - pth = os.path.join(self.__action_cache_dir, "keys.conflicting") - try: - with open(pth, "r") as fh: - version = fh.readline().rstrip() - if version != "VERSION 1": - return None - return set(l.rstrip() for l in fh) - except EnvironmentError as e: - if e.errno == errno.ENOENT: - return None - raise - - def gen_installed_actions_bytype(self, atype, implicit_dirs=False): - """Iterates through the installed actions of type 'atype'. If - 'implicit_dirs' is True and 'atype' is 'dir', then include - directories only implicitly defined by other filesystem - actions.""" - - if implicit_dirs and atype != "dir": - implicit_dirs = False - - excludes = self.list_excludes() - - for pfmri in self.gen_installed_pkgs(): - m = self.get_manifest(pfmri) - dirs = set() - for act in m.gen_actions_by_type(atype, - excludes=excludes): - if implicit_dirs: - dirs.add(act.attrs["path"]) - yield act, pfmri - if implicit_dirs: - da = pkg.actions.directory.DirectoryAction - for d in m.get_directories(excludes): - if d not in dirs: - yield da(path=d, implicit="true"), pfmri - - def get_installed_pubs(self): - """Returns a set containing the prefixes of all publishers with - installed packages.""" - - cat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - return cat.publishers() - - def strtofmri(self, myfmri): - return pkg.fmri.PkgFmri(myfmri) - - def strtomatchingfmri(self, myfmri): - return pkg.fmri.MatchingPkgFmri(myfmri) - - def get_user_by_name(self, name): - uid = self._usersbyname.get(name, None) - if uid is not None: - return uid - return portable.get_user_by_name(name, self.root, - self.type != IMG_USER) - - def get_name_by_uid(self, uid, returnuid = False): - # XXX What to do about IMG_PARTIAL? - try: - return portable.get_name_by_uid(uid, self.root, - self.type != IMG_USER) - except KeyError: - if returnuid: - return uid - else: - raise - - def get_group_by_name(self, name): - gid = self._groupsbyname.get(name, None) - if gid is not None: - return gid - return portable.get_group_by_name(name, self.root, - self.type != IMG_USER) - - def get_name_by_gid(self, gid, returngid = False): - try: - return portable.get_name_by_gid(gid, self.root, - self.type != IMG_USER) - except KeyError: - if returngid: - return gid - else: - raise - - def get_usernames_by_gid(self, gid): - return portable.get_usernames_by_gid(gid, self.root) - - def update_index_dir(self, postfix="index"): - """Since the index directory will not reliably be updated when - the image root is, this should be called prior to using the - index directory. - """ - if self.version == self.CURRENT_VERSION: - self.index_dir = os.path.join(self.imgdir, "cache", - postfix) + if DebugValues.get_value("simulate-plan-hang"): + # If pkg5.hang file is present in image dir, then + # sleep after loading configuration until file is + # gone. This is used by the test suite for signal + # handling testing, etc. + hang_file = os.path.join(self.imgdir, "pkg5.hang") + with open(hang_file, "w") as f: + f.write(str(os.getpid())) + + while os.path.exists(hang_file): + time.sleep(1) + + # Allow garbage collection of previous plan. + self.imageplan = None + + ip = imageplan.ImagePlan( + self, _op, _progtrack, _check_cancel, noexecute=_noexecute + ) + + # Always start with most current (on-disk) state information. + self.__init_catalogs() + + try: + try: + if _ip_noop: + ip.plan_noop(**kwargs) + elif _op in [ + pkgdefs.API_OP_ATTACH, + pkgdefs.API_OP_DETACH, + pkgdefs.API_OP_SYNC, + ]: + ip.plan_sync(**kwargs) + elif _op in [ + pkgdefs.API_OP_CHANGE_FACET, + pkgdefs.API_OP_CHANGE_VARIANT, + ]: + ip.plan_change_varcets(**kwargs) + elif _op == pkgdefs.API_OP_DEHYDRATE: + ip.plan_dehydrate(**kwargs) + elif _op == pkgdefs.API_OP_INSTALL: + ip.plan_install(**kwargs) + elif _op == pkgdefs.API_OP_EXACT_INSTALL: + ip.plan_exact_install(**kwargs) + elif _op in [pkgdefs.API_OP_FIX, pkgdefs.API_OP_VERIFY]: + ip.plan_fix(**kwargs) + elif _op == pkgdefs.API_OP_REHYDRATE: + ip.plan_rehydrate(**kwargs) + elif _op == pkgdefs.API_OP_REVERT: + ip.plan_revert(**kwargs) + elif _op == pkgdefs.API_OP_SET_MEDIATOR: + ip.plan_set_mediators(**kwargs) + elif _op == pkgdefs.API_OP_UNINSTALL: + ip.plan_uninstall(**kwargs) + elif _op == pkgdefs.API_OP_UPDATE: + ip.plan_update(**kwargs) else: - self.index_dir = os.path.join(self.imgdir, postfix) - - def cleanup_downloads(self): - """Clean up any downloads that were in progress but that - did not successfully finish.""" - - shutil.rmtree(self._incoming_cache_dir, True) - - def cleanup_cached_content(self, progtrack=None, force=False, - verbose=False): - """Delete the directory that stores all of our cached - downloaded content. This may take a while for a large - directory hierarchy. Don't clean up caches if the - user overrode the underlying setting using PKG_CACHEDIR or - PKG_CACHEROOT. """ - - if (not force and - not self.cfg.get_policy(imageconfig.FLUSH_CONTENT_CACHE)): - return - - cdirs = [] - for path, readonly, pub, layout in self.get_cachedirs(): - if verbose: - print('Checking cache directory {} ({})' - .format(path, pub)) - if readonly or (self.__user_cache_dir and - path.startswith(self.__user_cache_dir)): - continue - cdirs.append(path) - - if not cdirs: - return - - if not progtrack: - progtrack = progress.NullProgressTracker() - - # 'Updating package cache' - progtrack.job_start(progtrack.JOB_PKG_CACHE, goal=len(cdirs)) - for path in cdirs: - shutil.rmtree(path, True) - progtrack.job_add_progress(progtrack.JOB_PKG_CACHE) - progtrack.job_done(progtrack.JOB_PKG_CACHE) - - def salvage(self, path, full_path=False): - """Called when unexpected file or directory is found during - package operations; returns the path of the salvage - directory where the item was stored. Can be called with - either image-relative or absolute (current) path to file/dir - to be salvaged. If full_path is False (the default), remove - the current mountpoint of the image from the returned - directory path""" - - # This ensures that if the path is already rooted in the image, - # that it will be stored in lost+found (due to os.path.join - # behaviour with absolute path components). - if path.startswith(self.root): - path = path.replace(self.root, "", 1) - - if os.path.isabs(path): - # If for some reason the path wasn't rooted in the - # image, but it is an absolute one, then strip the - # absolute part so that it will be stored in lost+found - # (due to os.path.join behaviour with absolute path - # components). - path = os.path.splitdrive(path)[-1].lstrip(os.path.sep) - - sdir = os.path.normpath( - os.path.join(self.imgdir, "lost+found", - path + "-" + time.strftime("%Y%m%dT%H%M%SZ"))) - - parent = os.path.dirname(sdir) - if not os.path.exists(parent): - misc.makedirs(parent) - - orig = os.path.normpath(os.path.join(self.root, path)) - - misc.move(orig, sdir) - # remove current mountpoint from sdir - if not full_path: - sdir.replace(self.root, "", 1) - return sdir - - def recover(self, local_spath, full_dest_path, dest_path, old_path): - """Called when recovering directory contents to implement - "salvage-from" directive... full_dest_path must exist. - dest_path is the image-relative location where we salvage to, - old_path is original image-relative directory that delivered - the files we're now recovering. - - When recovering directories where the salvage-from string is - a substring of the previously packaged directory, attempt - to restore as much of the old directory structure as possible - by comparing the salvage-from value with the previously - packaged directory. - - For example, say we had user-content in /var/user/myuser/.ssh, - but have stopped delivering that dir, replacing it with a new - directory /var/.migrate/user which specifies - salvage-from=var/user. - - The intent of the package author, was to have the - /var/.migrate/user directory get the unpackaged 'myuser/.ssh' - directory created as part of the salvaging operation, giving - them /var/.migrate/user/myuser/.ssh - and not to just end up with - /var/.migrate/user/ - """ - - source_path = os.path.normpath( - os.path.join(self.root, local_spath)) - if dest_path != old_path and old_path.startswith( - dest_path + os.path.sep): - # this is here so that when salvaging the contents - # of a previously packaged directory, we attempt to - # restore as much of the old directory structure as - # possible. - spath = os.path.relpath(old_path, dest_path) - full_dest_path = os.path.join(full_dest_path, spath) - try: - os.makedirs(full_dest_path) - except OSError as e: - if e.errno != errno.EEXIST: - raise e - - for file_name in os.listdir(source_path): - misc.move(os.path.join(source_path, file_name), - full_dest_path) - - def temporary_dir(self): - """Create a temp directory under the image directory for various - purposes. If the process is unable to create a directory in the - image's temporary directory, a replacement location is found.""" - - try: - misc.makedirs(self.__tmpdir) - except (apx.PermissionsException, - apx.ReadOnlyFileSystemException): - self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") - atexit.register(shutil.rmtree, - self.__tmpdir, ignore_errors=True) - return self.temporary_dir() - - try: - rval = tempfile.mkdtemp(dir=self.__tmpdir) - - # Force standard mode. - os.chmod(rval, misc.PKG_DIR_MODE) - return rval - except EnvironmentError as e: - if e.errno == errno.EACCES or e.errno == errno.EROFS: - self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") - atexit.register(shutil.rmtree, - self.__tmpdir, ignore_errors=True) - return self.temporary_dir() - raise apx._convert_error(e) - - def temporary_file(self, close=True): - """Create a temporary file under the image directory for various - purposes. If 'close' is True, close the file descriptor; - otherwise leave it open. If the process is unable to create a - file in the image's temporary directory, a replacement is - found.""" - - try: - misc.makedirs(self.__tmpdir) - except (apx.PermissionsException, - apx.ReadOnlyFileSystemException): - self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") - atexit.register(shutil.rmtree, - self.__tmpdir, ignore_errors=True) - return self.temporary_file(close=close) - - try: - fd, name = tempfile.mkstemp(dir=self.__tmpdir) - if close: - os.close(fd) - except EnvironmentError as e: - if e.errno == errno.EACCES or e.errno == errno.EROFS: - self.__tmpdir = tempfile.mkdtemp(prefix="pkg5tmp-") - atexit.register(shutil.rmtree, - self.__tmpdir, ignore_errors=True) - return self.temporary_file(close=close) - raise apx._convert_error(e) + raise RuntimeError("Unknown api op: {0}".format(_op)) + + except apx.ActionExecutionError as e: + raise + except pkg.actions.ActionError as e: + raise apx.InvalidPackageErrors([e]) + except apx.ApiException: + raise + try: + self.__call_imageplan_evaluate(ip) + except apx.ActionExecutionError as e: + raise + except pkg.actions.ActionError as e: + raise apx.InvalidPackageErrors([e]) + finally: + self.__cleanup_alt_pkg_certs() + + def make_install_plan( + self, + op, + progtrack, + check_cancel, + noexecute, + pkgs_inst=None, + reject_list=misc.EmptyI, + ): + """Take a list of packages, specified in pkgs_inst, and attempt + to assemble an appropriate image plan. This is a helper + routine for some common operations in the client. + """ - if close: - return name + progtrack.plan_all_start() + + self.__make_plan_common( + op, + progtrack, + check_cancel, + noexecute, + pkgs_inst=pkgs_inst, + reject_list=reject_list, + ) + + progtrack.plan_all_done() + + def make_change_varcets_plan( + self, + op, + progtrack, + check_cancel, + noexecute, + facets=None, + reject_list=misc.EmptyI, + variants=None, + ): + """Take a list of variants and/or facets and attempt to + assemble an image plan which changes them. This is a helper + routine for some common operations in the client.""" + + progtrack.plan_all_start() + # compute dict of changing variants + if variants: + new = set(six.iteritems(variants)) + cur = set(six.iteritems(self.cfg.variants)) + variants = dict(new - cur) + elif facets: + new_facets = self.get_facets() + for f in facets: + if facets[f] is None: + new_facets.pop(f, None) else: - return fd, name - - def __filter_install_matches(self, matches): - """Attempts to eliminate redundant matches found during - packaging operations: - - * First, stems of installed packages for publishers that - are now unknown (no longer present in the image - configuration) are dropped. - - * Second, if multiple matches are still present, stems of - of installed packages, that are not presently in the - corresponding publisher's catalog, are dropped. - - * Finally, if multiple matches are still present, all - stems except for those in state PKG_STATE_INSTALLED are - dropped. - - Returns a list of the filtered matches, along with a dict of - their unique names.""" - - olist = [] - onames = set() - - # First eliminate any duplicate matches that are for unknown - # publishers (publishers which have been removed from the image - # configuration). - publist = set(p.prefix for p in self.get_publishers().values()) - for m, st in matches: - if m.publisher in publist: - onames.add(m.get_pkg_stem()) - olist.append((m, st)) - - # Next, if there are still multiple matches, eliminate matches - # belonging to publishers that no longer have the FMRI in their - # catalog. - found_state = False - if len(onames) > 1: - mlist = [] - mnames = set() - for m, st in olist: - if not st["in_catalog"]: - continue - if st["state"] == pkgdefs.PKG_STATE_INSTALLED: - found_state = True - mnames.add(m.get_pkg_stem()) - mlist.append((m, st)) - olist = mlist - onames = mnames - - # Finally, if there are still multiple matches, and a known - # stem is installed, then eliminate any stems that do not - # have an installed version. - if found_state and len(onames) > 1: - mlist = [] - mnames = set() - for m, st in olist: - if st["state"] == pkgdefs.PKG_STATE_INSTALLED: - mnames.add(m.get_pkg_stem()) - mlist.append((m, st)) - olist = mlist - onames = mnames - - return olist, onames - - def flag_pkgs(self, pfmris, state, value, progtrack): - """Sets/unsets a state flag for packages installed in - the image.""" - - if self.version < self.CURRENT_VERSION: - raise apx.ImageFormatUpdateNeeded(self.root) - - # Only the 'manual' flag can be set or unset via this - # method. - if state not in [pkgdefs.PKG_STATE_MANUAL]: - raise apx.ImagePkgStateError(pfmri, state) - - if not progtrack: - progtrack = progress.NullProgressTracker() - - with self.locked_op("state"): - - icat = self.get_catalog(self.IMG_CATALOG_INSTALLED) - kcat = self.get_catalog(self.IMG_CATALOG_KNOWN) - - progtrack.job_start(progtrack.JOB_STATE_DB) - - changed = set() - - for pfmri in pfmris: - - entry = kcat.get_entry(pfmri) - mdata = entry.get("metadata", {}) - states = set(mdata.get("states", set())) - - if pkgdefs.PKG_STATE_INSTALLED not in states: - raise apx.ImagePkgStateError(pfmri, - states) - - progtrack.job_add_progress( - progtrack.JOB_STATE_DB) - - if value and state not in states: - states.add(state) - changed.add(pfmri) - elif not value and state in states: - states.discard(state) - changed.add(pfmri) - else: - continue - - # Catalog format only supports lists. - mdata["states"] = list(states) - - # Now record the package state. - kcat.update_entry(mdata, pfmri=pfmri) - icat.update_entry(mdata, pfmri=pfmri) - - progtrack.job_done(progtrack.JOB_STATE_DB) - - if len(changed): - progtrack.job_start(progtrack.JOB_IMAGE_STATE) - self.__catalog_save([ - (kcat, self.IMG_CATALOG_KNOWN), - (icat, self.IMG_CATALOG_INSTALLED) - ], changed, progtrack) - progtrack.job_done(progtrack.JOB_IMAGE_STATE) - - print('{} package{} updated.'.format( - len(changed), "s" if len(changed) != 1 else "")) - - def avoid_pkgs(self, pat_list, progtrack, check_cancel): - """Avoid the specified packages... use pattern matching on - names; ignore versions.""" - - with self.locked_op("avoid"): - ip = imageplan.ImagePlan - self._avoid_set_save(self.avoid_set_get() | - set(ip.match_user_stems(self, pat_list, - ip.MATCH_UNINSTALLED))) - - def unavoid_pkgs(self, pat_list, progtrack, check_cancel): - """Unavoid the specified packages... use pattern matching on - names; ignore versions.""" - - with self.locked_op("unavoid"): - ip = imageplan.ImagePlan - unavoid_set = set(ip.match_user_stems(self, pat_list, - ip.MATCH_ALL)) - current_set = self.avoid_set_get() - not_avoided = unavoid_set - current_set - if not_avoided: - raise apx.PlanCreationException(not_avoided=not_avoided) - - # Don't allow unavoid if removal of the package from the - # avoid list would require the package to be installed - # as this would invalidate current image state. If the - # package is already installed though, it doesn't really - # matter if it's a target of an avoid or not. - installed_set = set([ - f.pkg_name - for f in self.gen_installed_pkgs() - ]) - - would_install = [ - a - for f, a in self.gen_tracked_stems() - if a in unavoid_set and a not in installed_set - ] - - if would_install: - raise apx.PlanCreationException(would_install=would_install) - - self._avoid_set_save(current_set - unavoid_set) - - def get_avoid_dict(self): - """ return dict of lists (avoided stem, pkgs w/ group - dependencies on this pkg)""" - ret = dict((a, list()) for a in self.avoid_set_get()) - for fmri, group in self.gen_tracked_stems(): - if group in ret: - ret[group].append(fmri.pkg_name) - return ret - - def freeze_pkgs(self, pat_list, progtrack, check_cancel, dry_run, - comment): - """Freeze the specified packages... use pattern matching on - names. - - The 'pat_list' parameter contains the list of patterns of - packages to freeze. - - The 'progtrack' parameter contains the progress tracker for this - operation. - - The 'check_cancel' parameter contains a function to call to - check if the operation has been canceled. - - The 'dry_run' parameter controls whether packages are actually - frozen. - - The 'comment' parameter contains the comment, if any, which will - be associated with the packages that are frozen. - """ - - def __make_publisherless_fmri(pat): - p = pkg.fmri.MatchingPkgFmri(pat) - p.publisher = None - return p - - def __calc_frozen(): - stems_and_pats = imageplan.ImagePlan.freeze_pkgs_match( - self, pat_list) - return dict([(s, __make_publisherless_fmri(p)) - for s, p in six.iteritems(stems_and_pats)]) - if dry_run: - return list(__calc_frozen().values()) - with self.locked_op("freeze"): - stems_and_pats = __calc_frozen() - # Get existing dictionary of frozen packages. - d = self.__freeze_dict_load() - # Update the dictionary with the new freezes and - # comment. - timestamp = calendar.timegm(time.gmtime()) - d.update([(s, (str(p), comment, timestamp)) - for s, p in six.iteritems(stems_and_pats)]) - self._freeze_dict_save(d) - return list(stems_and_pats.values()) - - def unfreeze_pkgs(self, pat_list, progtrack, check_cancel, dry_run): - """Unfreeze the specified packages... use pattern matching on - names; ignore versions. - - The 'pat_list' parameter contains the list of patterns of - packages to freeze. - - The 'progtrack' parameter contains the progress tracker for this - operation. - - The 'check_cancel' parameter contains a function to call to - check if the operation has been canceled. - - The 'dry_run' parameter controls whether packages are actually - frozen.""" - - def __calc_unfrozen(): - # Get existing dictionary of frozen packages. - d = self.__freeze_dict_load() - # Match the user's patterns against the frozen packages - # and return the stems which matched, and the dictionary - # of the currently frozen packages. - ip = imageplan.ImagePlan - return set(ip.match_user_stems(self, pat_list, - ip.MATCH_ALL, raise_unmatched=False, - universe=[(None, k) for k in d.keys()])), d - - if dry_run: - return __calc_unfrozen()[0] - with self.locked_op("freeze"): - unfrozen_set, d = __calc_unfrozen() - # Remove the specified packages from the frozen set. - for n in unfrozen_set: - d.pop(n, None) - self._freeze_dict_save(d) - return unfrozen_set - - def __call_imageplan_evaluate(self, ip): - # A plan can be requested without actually performing an - # operation on the image. - if self.history.operation_name: - self.history.operation_start_state = ip.get_plan() + new_facets[f] = facets[f] + facets = new_facets + + self.__make_plan_common( + op, + progtrack, + check_cancel, + noexecute, + new_variants=variants, + new_facets=facets, + reject_list=reject_list, + ) + + progtrack.plan_all_done() + + def make_set_mediators_plan( + self, op, progtrack, check_cancel, noexecute, mediators + ): + """Take a dictionary of mediators and attempt to assemble an + appropriate image plan to set or revert them based on the + provided version and implementation values. This is a helper + routine for some common operations in the client. + """ - try: - ip.evaluate() - except apx.ConflictingActionErrors: - # Image plan evaluation can fail because of duplicate - # action discovery, but we still want to be able to - # display and log the solved FMRI changes. - self.imageplan = ip - if self.history.operation_name: - self.history.operation_end_state = \ - "Unevaluated: merged plan had errors\n" + \ - ip.get_plan(full=False) - raise - - self.imageplan = ip - - if self.history.operation_name: - self.history.operation_end_state = \ - ip.get_plan(full=False) - - def __make_plan_common(self, _op, _progtrack, _check_cancel, - _noexecute, _ip_noop=False, **kwargs): - """Private helper function to perform base plan creation and - cleanup. - """ + progtrack.plan_all_start() + + # Compute dict of changing mediators. + new_mediators = copy.deepcopy(mediators) + old_mediators = self.cfg.mediators + invalid_mediations = collections.defaultdict(dict) + for m in new_mediators.keys(): + new_values = new_mediators[m] + if not new_values: + if m not in old_mediators: + # Nothing to revert. + del new_mediators[m] + continue + + # Revert mediator to defaults. + new_mediators[m] = {} + continue + + # Validate mediator, provided version, implementation, + # and source. + valid, error = med.valid_mediator(m) + if not valid: + invalid_mediations[m]["mediator"] = (m, error) + + med_version = new_values.get("version") + if med_version: + valid, error = med.valid_mediator_version(med_version) + if valid: + new_mediators[m]["version"] = pkg.version.Version( + med_version + ) + else: + invalid_mediations[m]["version"] = (med_version, error) - if DebugValues.get_value("simulate-plan-hang"): - # If pkg5.hang file is present in image dir, then - # sleep after loading configuration until file is - # gone. This is used by the test suite for signal - # handling testing, etc. - hang_file = os.path.join(self.imgdir, "pkg5.hang") - with open(hang_file, "w") as f: - f.write(str(os.getpid())) + med_impl = new_values.get("implementation") + if med_impl: + valid, error = med.valid_mediator_implementation( + med_impl, allow_empty_version=True + ) + if not valid: + invalid_mediations[m]["version"] = (med_impl, error) + + if invalid_mediations: + raise apx.PlanCreationException( + invalid_mediations=invalid_mediations + ) + + self.__make_plan_common( + op, progtrack, check_cancel, noexecute, new_mediators=new_mediators + ) + + progtrack.plan_all_done() + + def make_sync_plan( + self, + op, + progtrack, + check_cancel, + noexecute, + li_pkg_updates=True, + reject_list=misc.EmptyI, + ): + """Attempt to create an appropriate image plan to bring an + image in sync with it's linked image constraints. This is a + helper routine for some common operations in the client.""" + + progtrack.plan_all_start() + + self.__make_plan_common( + op, + progtrack, + check_cancel, + noexecute, + reject_list=reject_list, + li_pkg_updates=li_pkg_updates, + ) + + progtrack.plan_all_done() + + def make_uninstall_plan( + self, + op, + progtrack, + check_cancel, + ignore_missing, + noexecute, + pkgs_to_uninstall, + ): + """Create uninstall plan to remove the specified packages.""" + + progtrack.plan_all_start() + + self.__make_plan_common( + op, + progtrack, + check_cancel, + noexecute, + ignore_missing=ignore_missing, + pkgs_to_uninstall=pkgs_to_uninstall, + ) + + progtrack.plan_all_done() + + def make_update_plan( + self, + op, + progtrack, + check_cancel, + noexecute, + ignore_missing=False, + pkgs_update=None, + reject_list=misc.EmptyI, + ): + """Create a plan to update all packages or the specific ones as + far as possible. This is a helper routine for some common + operations in the client. + """ - while os.path.exists(hang_file): - time.sleep(1) + progtrack.plan_all_start() + self.__make_plan_common( + op, + progtrack, + check_cancel, + noexecute, + ignore_missing=ignore_missing, + pkgs_update=pkgs_update, + reject_list=reject_list, + ) + progtrack.plan_all_done() + + def make_revert_plan( + self, op, progtrack, check_cancel, noexecute, args, tagged + ): + """Revert the specified files, or all files tagged as specified + in args to their manifest definitions. + """ - # Allow garbage collection of previous plan. - self.imageplan = None + progtrack.plan_all_start() + self.__make_plan_common( + op, progtrack, check_cancel, noexecute, args=args, tagged=tagged + ) + progtrack.plan_all_done() + + def make_dehydrate_plan( + self, op, progtrack, check_cancel, noexecute, publishers + ): + """Remove non-editable files and hardlinks from an image.""" + + progtrack.plan_all_start() + self.__make_plan_common( + op, progtrack, check_cancel, noexecute, publishers=publishers + ) + progtrack.plan_all_done() + + def make_rehydrate_plan( + self, op, progtrack, check_cancel, noexecute, publishers + ): + """Reinstall non-editable files and hardlinks to an dehydrated + image.""" + + progtrack.plan_all_start() + self.__make_plan_common( + op, progtrack, check_cancel, noexecute, publishers=publishers + ) + progtrack.plan_all_done() + + def make_fix_plan( + self, + op, + progtrack, + check_cancel, + noexecute, + args, + unpackaged=False, + unpackaged_only=False, + verify_paths=EmptyI, + ): + """Create an image plan to fix the image. Note: verify shares + the same routine.""" + + progtrack.plan_all_start() + self.__make_plan_common( + op, + progtrack, + check_cancel, + noexecute, + args=args, + unpackaged=unpackaged, + unpackaged_only=unpackaged_only, + verify_paths=verify_paths, + ) + progtrack.plan_all_done() + + def make_noop_plan(self, op, progtrack, check_cancel, noexecute): + """Create an image plan that doesn't update the image in any + way.""" + + progtrack.plan_all_start() + self.__make_plan_common( + op, progtrack, check_cancel, noexecute, _ip_noop=True + ) + progtrack.plan_all_done() + + def ipkg_is_up_to_date( + self, check_cancel, noexecute, refresh_allowed=True, progtrack=None + ): + """Test whether the packaging system is updated to the latest + version known to be available for this image.""" - ip = imageplan.ImagePlan(self, _op, _progtrack, _check_cancel, - noexecute=_noexecute) + # + # This routine makes the distinction between the "target image", + # which will be altered, and the "running image", which is + # to say whatever image appears to contain the version of the + # pkg command we're running. + # - # Always start with most current (on-disk) state information. - self.__init_catalogs() + # + # There are two relevant cases here: + # 1) Packaging code and image we're updating are the same + # image. (i.e. 'pkg update') + # + # 2) Packaging code's image and the image we're updating are + # different (i.e. 'pkg update -R') + # + # In general, we care about getting the user to run the + # most recent packaging code available for their build. So, + # if we're not in the liveroot case, we create a new image + # which represents "/" on the system. + # + if not progtrack: + progtrack = progress.NullProgressTracker() + + img = self + + if self.__cmddir and not img.is_liveroot(): + # + # Find the path to ourselves, and use that + # as a way to locate the image we're in. It's + # not perfect-- we could be in a developer's + # workspace, for example. + # + newimg = Image( + self.__cmddir, + allow_ondisk_upgrade=False, + progtrack=progtrack, + cmdpath=self.cmdpath, + ) + useimg = True + if refresh_allowed: + # If refreshing publisher metadata is allowed, + # then perform a refresh so that a new packaging + # system package can be discovered. + newimg.lock(allow_unprivileged=True) try: - try: - if _ip_noop: - ip.plan_noop(**kwargs) - elif _op in [ - pkgdefs.API_OP_ATTACH, - pkgdefs.API_OP_DETACH, - pkgdefs.API_OP_SYNC]: - ip.plan_sync(**kwargs) - elif _op in [ - pkgdefs.API_OP_CHANGE_FACET, - pkgdefs.API_OP_CHANGE_VARIANT]: - ip.plan_change_varcets(**kwargs) - elif _op == pkgdefs.API_OP_DEHYDRATE: - ip.plan_dehydrate(**kwargs) - elif _op == pkgdefs.API_OP_INSTALL: - ip.plan_install(**kwargs) - elif _op ==pkgdefs.API_OP_EXACT_INSTALL: - ip.plan_exact_install(**kwargs) - elif _op in [pkgdefs.API_OP_FIX, - pkgdefs.API_OP_VERIFY]: - ip.plan_fix(**kwargs) - elif _op == pkgdefs.API_OP_REHYDRATE: - ip.plan_rehydrate(**kwargs) - elif _op == pkgdefs.API_OP_REVERT: - ip.plan_revert(**kwargs) - elif _op == pkgdefs.API_OP_SET_MEDIATOR: - ip.plan_set_mediators(**kwargs) - elif _op == pkgdefs.API_OP_UNINSTALL: - ip.plan_uninstall(**kwargs) - elif _op == pkgdefs.API_OP_UPDATE: - ip.plan_update(**kwargs) - else: - raise RuntimeError( - "Unknown api op: {0}".format(_op)) - - except apx.ActionExecutionError as e: - raise - except pkg.actions.ActionError as e: - raise apx.InvalidPackageErrors([e]) - except apx.ApiException: - raise - try: - self.__call_imageplan_evaluate(ip) - except apx.ActionExecutionError as e: - raise - except pkg.actions.ActionError as e: - raise apx.InvalidPackageErrors([e]) + newimg.refresh_publishers(progtrack=progtrack) + except (apx.ImageFormatUpdateNeeded, apx.PermissionsException): + # Can't use the image to perform an + # update check and it would be wrong + # to prevent the operation from + # continuing in these cases. + useimg = False + except apx.CatalogRefreshException as cre: + cre.errmessage = _("pkg(7) update check failed.") + raise finally: - self.__cleanup_alt_pkg_certs() - - def make_install_plan(self, op, progtrack, check_cancel, - noexecute, pkgs_inst=None, reject_list=misc.EmptyI): - """Take a list of packages, specified in pkgs_inst, and attempt - to assemble an appropriate image plan. This is a helper - routine for some common operations in the client. - """ - - progtrack.plan_all_start() - - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, pkgs_inst=pkgs_inst, - reject_list=reject_list) - - progtrack.plan_all_done() - - def make_change_varcets_plan(self, op, progtrack, check_cancel, - noexecute, facets=None, reject_list=misc.EmptyI, - variants=None): - """Take a list of variants and/or facets and attempt to - assemble an image plan which changes them. This is a helper - routine for some common operations in the client.""" - - progtrack.plan_all_start() - # compute dict of changing variants - if variants: - new = set(six.iteritems(variants)) - cur = set(six.iteritems(self.cfg.variants)) - variants = dict(new - cur) - elif facets: - new_facets = self.get_facets() - for f in facets: - if facets[f] is None: - new_facets.pop(f, None) - else: - new_facets[f] = facets[f] - facets = new_facets - - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, new_variants=variants, new_facets=facets, - reject_list=reject_list) - - progtrack.plan_all_done() - - def make_set_mediators_plan(self, op, progtrack, check_cancel, - noexecute, mediators): - """Take a dictionary of mediators and attempt to assemble an - appropriate image plan to set or revert them based on the - provided version and implementation values. This is a helper - routine for some common operations in the client. - """ - - progtrack.plan_all_start() - - # Compute dict of changing mediators. - new_mediators = copy.deepcopy(mediators) - old_mediators = self.cfg.mediators - invalid_mediations = collections.defaultdict(dict) - for m in new_mediators.keys(): - new_values = new_mediators[m] - if not new_values: - if m not in old_mediators: - # Nothing to revert. - del new_mediators[m] - continue - - # Revert mediator to defaults. - new_mediators[m] = {} - continue - - # Validate mediator, provided version, implementation, - # and source. - valid, error = med.valid_mediator(m) - if not valid: - invalid_mediations[m]["mediator"] = (m, error) - - med_version = new_values.get("version") - if med_version: - valid, error = med.valid_mediator_version( - med_version) - if valid: - new_mediators[m]["version"] = \ - pkg.version.Version(med_version) - else: - invalid_mediations[m]["version"] = \ - (med_version, error) - - med_impl = new_values.get("implementation") - if med_impl: - valid, error = med.valid_mediator_implementation( - med_impl, allow_empty_version=True) - if not valid: - invalid_mediations[m]["version"] = \ - (med_impl, error) - - if invalid_mediations: - raise apx.PlanCreationException( - invalid_mediations=invalid_mediations) - - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, new_mediators=new_mediators) - - progtrack.plan_all_done() - - def make_sync_plan(self, op, progtrack, check_cancel, - noexecute, li_pkg_updates=True, reject_list=misc.EmptyI): - """Attempt to create an appropriate image plan to bring an - image in sync with it's linked image constraints. This is a - helper routine for some common operations in the client.""" - - progtrack.plan_all_start() - - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, reject_list=reject_list, - li_pkg_updates=li_pkg_updates) - - progtrack.plan_all_done() - - def make_uninstall_plan(self, op, progtrack, check_cancel, - ignore_missing, noexecute, pkgs_to_uninstall): - """Create uninstall plan to remove the specified packages.""" - - progtrack.plan_all_start() - - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, ignore_missing=ignore_missing, - pkgs_to_uninstall=pkgs_to_uninstall) - - progtrack.plan_all_done() - - def make_update_plan(self, op, progtrack, check_cancel, - noexecute, ignore_missing=False, pkgs_update=None, - reject_list=misc.EmptyI): - """Create a plan to update all packages or the specific ones as - far as possible. This is a helper routine for some common - operations in the client. - """ - - progtrack.plan_all_start() - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, ignore_missing=ignore_missing, - pkgs_update=pkgs_update, reject_list=reject_list) - progtrack.plan_all_done() - - def make_revert_plan(self, op, progtrack, check_cancel, - noexecute, args, tagged): - """Revert the specified files, or all files tagged as specified - in args to their manifest definitions. - """ - - progtrack.plan_all_start() - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, args=args, tagged=tagged) - progtrack.plan_all_done() - - def make_dehydrate_plan(self, op, progtrack, check_cancel, noexecute, - publishers): - """Remove non-editable files and hardlinks from an image.""" - - progtrack.plan_all_start() - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, publishers=publishers) - progtrack.plan_all_done() - - def make_rehydrate_plan(self, op, progtrack, check_cancel, noexecute, - publishers): - """Reinstall non-editable files and hardlinks to an dehydrated - image.""" - - progtrack.plan_all_start() - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, publishers=publishers) - progtrack.plan_all_done() - - def make_fix_plan(self, op, progtrack, check_cancel, noexecute, args, - unpackaged=False, unpackaged_only=False, verify_paths=EmptyI): - """Create an image plan to fix the image. Note: verify shares - the same routine.""" - - progtrack.plan_all_start() - self.__make_plan_common(op, progtrack, check_cancel, noexecute, - args=args, unpackaged=unpackaged, - unpackaged_only=unpackaged_only, verify_paths=verify_paths) - progtrack.plan_all_done() - - def make_noop_plan(self, op, progtrack, check_cancel, - noexecute): - """Create an image plan that doesn't update the image in any - way.""" - - progtrack.plan_all_start() - self.__make_plan_common(op, progtrack, check_cancel, - noexecute, _ip_noop=True) - progtrack.plan_all_done() - - def ipkg_is_up_to_date(self, check_cancel, noexecute, - refresh_allowed=True, progtrack=None): - """Test whether the packaging system is updated to the latest - version known to be available for this image.""" - - # - # This routine makes the distinction between the "target image", - # which will be altered, and the "running image", which is - # to say whatever image appears to contain the version of the - # pkg command we're running. - # - - # - # There are two relevant cases here: - # 1) Packaging code and image we're updating are the same - # image. (i.e. 'pkg update') - # - # 2) Packaging code's image and the image we're updating are - # different (i.e. 'pkg update -R') - # - # In general, we care about getting the user to run the - # most recent packaging code available for their build. So, - # if we're not in the liveroot case, we create a new image - # which represents "/" on the system. - # - - if not progtrack: - progtrack = progress.NullProgressTracker() - - img = self - - if self.__cmddir and not img.is_liveroot(): - # - # Find the path to ourselves, and use that - # as a way to locate the image we're in. It's - # not perfect-- we could be in a developer's - # workspace, for example. - # - newimg = Image(self.__cmddir, - allow_ondisk_upgrade=False, progtrack=progtrack, - cmdpath=self.cmdpath) - useimg = True - if refresh_allowed: - # If refreshing publisher metadata is allowed, - # then perform a refresh so that a new packaging - # system package can be discovered. - newimg.lock(allow_unprivileged=True) - try: - newimg.refresh_publishers( - progtrack=progtrack) - except (apx.ImageFormatUpdateNeeded, - apx.PermissionsException): - # Can't use the image to perform an - # update check and it would be wrong - # to prevent the operation from - # continuing in these cases. - useimg = False - except apx.CatalogRefreshException as cre: - cre.errmessage = \ - _("pkg(7) update check failed.") - raise - finally: - newimg.unlock() - - if useimg: - img = newimg - - pfmri = img.get_version_installed(img.strtofmri("package/pkg")) - if not pfmri or \ - not pkgdefs.PKG_STATE_UPGRADABLE in img.get_pkg_state(pfmri): - # If no version of the package system is installed or a - # newer version isn't available, then the client is - # "up-to-date". - return True - - inc_fmri = img.get_version_installed(img.strtofmri( - "consolidation/ips/ips-incorporation")) - if inc_fmri: - # If the ips-incorporation is installed (it should be - # since package/pkg depends on it), then we can - # bypass the solver and plan evaluation if none of the - # newer versions are allowed by the incorporation. - - # Find the version at which package/pkg is incorporated. - cat = img.get_catalog(img.IMG_CATALOG_KNOWN) - inc_ver = None - for act in cat.get_entry_actions(inc_fmri, [cat.DEPENDENCY], - excludes=img.list_excludes()): - if act.name == "depend" and \ - act.attrs["type"] == "incorporate" and \ - act.attrs["fmri"].startswith("package/pkg"): - inc_ver = img.strtofmri( - act.attrs["fmri"]).version - break - - if inc_ver: - for ver, fmris in cat.fmris_by_version( - "package/pkg"): - if ver != pfmri.version and \ - ver.is_successor(inc_ver, - pkg.version.CONSTRAINT_AUTO): - break - else: - # No version is newer than installed and - # satisfied incorporation constraint. - return True - - # XXX call to progress tracker that the package is being - # refreshed - img.make_install_plan(pkgdefs.API_OP_INSTALL, progtrack, - check_cancel, noexecute, pkgs_inst=["pkg:/package/pkg"]) - - return img.imageplan.nothingtodo() - - # avoid set implementation uses json to store a set of pkg_stems - # being avoided (explicitly or implicitly), and a set of tracked stems - # that are obsolete. - # - # format is (version, dict((pkg stem, "avoid", "implicit-avoid" or - # "obsolete")) - - __AVOID_SET_VERSION = 1 - - def avoid_set_get(self, implicit=False): - """Return copy of avoid set""" - if implicit: - return self.__implicit_avoid_set.copy() - return self.__avoid_set.copy() - - def obsolete_set_get(self): - """Return copy of tracked obsolete pkgs""" - return self.__group_obsolete.copy() - - def __avoid_set_load(self): - """Load avoid set fron image state directory""" - state_file = os.path.join(self._statedir, "avoid_set") - self.__avoid_set = set() - self.__implicit_avoid_set = set() - self.__group_obsolete = set() - if os.path.isfile(state_file): - try: - with open(state_file) as f: - version, d = json.load(f) - except EnvironmentError as e: - raise apx._convert_error(e) - except ValueError as e: - salvaged_path = self.salvage(state_file, - full_path=True) - logger.warning("Corrupted avoid list - salvaging" - " file {state_file} in {salvaged_path}" - .format(state_file=state_file, - salvaged_path=salvaged_path)) - return - assert version == self.__AVOID_SET_VERSION - for stem in d: - if d[stem] == "avoid": - self.__avoid_set.add(stem) - elif d[stem] == "implicit-avoid": - self.__implicit_avoid_set.add(stem) - elif d[stem] == "obsolete": - self.__group_obsolete.add(stem) - else: - logger.warning("Corrupted avoid list - ignoring") - self.__avoid_set = set() - self.__implicit_avoid_set = set() - self.__group_obsolete = set() - self.__avoid_set_altered = True + newimg.unlock() + + if useimg: + img = newimg + + pfmri = img.get_version_installed(img.strtofmri("package/pkg")) + if not pfmri or not pkgdefs.PKG_STATE_UPGRADABLE in img.get_pkg_state( + pfmri + ): + # If no version of the package system is installed or a + # newer version isn't available, then the client is + # "up-to-date". + return True + + inc_fmri = img.get_version_installed( + img.strtofmri("consolidation/ips/ips-incorporation") + ) + if inc_fmri: + # If the ips-incorporation is installed (it should be + # since package/pkg depends on it), then we can + # bypass the solver and plan evaluation if none of the + # newer versions are allowed by the incorporation. + + # Find the version at which package/pkg is incorporated. + cat = img.get_catalog(img.IMG_CATALOG_KNOWN) + inc_ver = None + for act in cat.get_entry_actions( + inc_fmri, [cat.DEPENDENCY], excludes=img.list_excludes() + ): + if ( + act.name == "depend" + and act.attrs["type"] == "incorporate" + and act.attrs["fmri"].startswith("package/pkg") + ): + inc_ver = img.strtofmri(act.attrs["fmri"]).version + break + + if inc_ver: + for ver, fmris in cat.fmris_by_version("package/pkg"): + if ver != pfmri.version and ver.is_successor( + inc_ver, pkg.version.CONSTRAINT_AUTO + ): + break else: - self.__avoid_set_altered = True - - def _avoid_set_save(self, new_set=None, implicit_avoid=None, - obsolete=None): - """Store avoid set to image state directory""" - if new_set is not None: - self.__avoid_set_altered = True - self.__avoid_set = new_set - - if implicit_avoid is not None: - self.__avoid_set_altered = True - self.__implicit_avoid_set = implicit_avoid - - if obsolete is not None: - self.__group_obsolete = obsolete - self.__avoid_set_altered = True - - if not self.__avoid_set_altered: - return - - - state_file = os.path.join(self._statedir, "avoid_set") - tmp_file = os.path.join(self._statedir, "avoid_set.new") - tf = open(tmp_file, "w") - - d = dict((a, "avoid") for a in self.__avoid_set) - d.update( - (a, "implicit-avoid") - for a in self.__implicit_avoid_set + # No version is newer than installed and + # satisfied incorporation constraint. + return True + + # XXX call to progress tracker that the package is being + # refreshed + img.make_install_plan( + pkgdefs.API_OP_INSTALL, + progtrack, + check_cancel, + noexecute, + pkgs_inst=["pkg:/package/pkg"], + ) + + return img.imageplan.nothingtodo() + + # avoid set implementation uses json to store a set of pkg_stems + # being avoided (explicitly or implicitly), and a set of tracked stems + # that are obsolete. + # + # format is (version, dict((pkg stem, "avoid", "implicit-avoid" or + # "obsolete")) + + __AVOID_SET_VERSION = 1 + + def avoid_set_get(self, implicit=False): + """Return copy of avoid set""" + if implicit: + return self.__implicit_avoid_set.copy() + return self.__avoid_set.copy() + + def obsolete_set_get(self): + """Return copy of tracked obsolete pkgs""" + return self.__group_obsolete.copy() + + def __avoid_set_load(self): + """Load avoid set fron image state directory""" + state_file = os.path.join(self._statedir, "avoid_set") + self.__avoid_set = set() + self.__implicit_avoid_set = set() + self.__group_obsolete = set() + if os.path.isfile(state_file): + try: + with open(state_file) as f: + version, d = json.load(f) + except EnvironmentError as e: + raise apx._convert_error(e) + except ValueError as e: + salvaged_path = self.salvage(state_file, full_path=True) + logger.warning( + "Corrupted avoid list - salvaging" + " file {state_file} in {salvaged_path}".format( + state_file=state_file, salvaged_path=salvaged_path + ) ) - d.update((a, "obsolete") for a in self.__group_obsolete) - - try: - json.dump((self.__AVOID_SET_VERSION, d), tf) - tf.close() - portable.rename(tmp_file, state_file) - except Exception as e: - logger.warning("Cannot save avoid list: {0}".format( - str(e))) - return - - self.update_last_modified() - - self.__avoid_set_altered = False - - # frozen dict implementation uses json to store a dictionary of - # pkg_stems that are frozen, the versions at which they're frozen, and - # the reason, if given, why the package was frozen. - # - # format is (version, dict((pkg stem, (fmri, comment, timestamp)))) - - __FROZEN_DICT_VERSION = 1 - - def get_frozen_list(self): - """Return a list of tuples containing the fmri that was frozen, - and the reason it was frozen.""" + return + assert version == self.__AVOID_SET_VERSION + for stem in d: + if d[stem] == "avoid": + self.__avoid_set.add(stem) + elif d[stem] == "implicit-avoid": + self.__implicit_avoid_set.add(stem) + elif d[stem] == "obsolete": + self.__group_obsolete.add(stem) + else: + logger.warning("Corrupted avoid list - ignoring") + self.__avoid_set = set() + self.__implicit_avoid_set = set() + self.__group_obsolete = set() + self.__avoid_set_altered = True + else: + self.__avoid_set_altered = True + + def _avoid_set_save(self, new_set=None, implicit_avoid=None, obsolete=None): + """Store avoid set to image state directory""" + if new_set is not None: + self.__avoid_set_altered = True + self.__avoid_set = new_set + + if implicit_avoid is not None: + self.__avoid_set_altered = True + self.__implicit_avoid_set = implicit_avoid + + if obsolete is not None: + self.__group_obsolete = obsolete + self.__avoid_set_altered = True + + if not self.__avoid_set_altered: + return + + state_file = os.path.join(self._statedir, "avoid_set") + tmp_file = os.path.join(self._statedir, "avoid_set.new") + tf = open(tmp_file, "w") + + d = dict((a, "avoid") for a in self.__avoid_set) + d.update((a, "implicit-avoid") for a in self.__implicit_avoid_set) + d.update((a, "obsolete") for a in self.__group_obsolete) + + try: + json.dump((self.__AVOID_SET_VERSION, d), tf) + tf.close() + portable.rename(tmp_file, state_file) + except Exception as e: + logger.warning("Cannot save avoid list: {0}".format(str(e))) + return + + self.update_last_modified() + + self.__avoid_set_altered = False + + # frozen dict implementation uses json to store a dictionary of + # pkg_stems that are frozen, the versions at which they're frozen, and + # the reason, if given, why the package was frozen. + # + # format is (version, dict((pkg stem, (fmri, comment, timestamp)))) + + __FROZEN_DICT_VERSION = 1 + + def get_frozen_list(self): + """Return a list of tuples containing the fmri that was frozen, + and the reason it was frozen.""" + + return [ + (pkg.fmri.MatchingPkgFmri(v[0]), v[1], v[2]) + for v in self.__freeze_dict_load().values() + ] + + def __freeze_dict_load(self): + """Load the dictionary containing the current state of frozen + packages.""" + + state_file = os.path.join(self._statedir, "frozen_dict") + if os.path.isfile(state_file): + try: + with open(state_file) as f: + version, d = json.load(f) + except EnvironmentError as e: + raise apx._convert_error(e) + except ValueError as e: + raise apx.InvalidFreezeFile(state_file) + if version != self.__FROZEN_DICT_VERSION: + raise apx.UnknownFreezeFileVersion( + version, self.__FROZEN_DICT_VERSION, state_file + ) + return d + return {} + + def _freeze_dict_save(self, new_dict): + """Save the dictionary of frozen packages.""" + + # Save the dictionary to disk. + state_file = os.path.join(self._statedir, "frozen_dict") + tmp_file = os.path.join(self._statedir, "frozen_dict.new") + + try: + with open(tmp_file, "w") as tf: + json.dump((self.__FROZEN_DICT_VERSION, new_dict), tf) + portable.rename(tmp_file, state_file) + self.update_last_modified() + except EnvironmentError as e: + raise apx._convert_error(e) + self.__rebuild_image_catalogs() + + @staticmethod + def get_dehydrated_exclude_func(dehydrated_pubs): + """A boolean function that will be added to the pkg(7) exclude + mechanism to determine if an action is allowed to be installed + based on whether its publisher is going to be dehydrated or has + been currently dehydrated.""" + + # A closure is used so that the list of dehydrated publishers + # can be accessed. + def __allow_action_dehydrate(act, publisher): + if publisher not in dehydrated_pubs: + # Allow actions from publishers that are not + # dehydrated. + return True - return [ - (pkg.fmri.MatchingPkgFmri(v[0]), v[1], v[2]) - for v in self.__freeze_dict_load().values() - ] + aname = act.name + if aname == "file": + attrs = act.attrs + if attrs.get("dehydrate") == "false": + return True + if "preserve" in attrs or "overlay" in attrs: + return True + return False + elif aname == "hardlink": + return False - def __freeze_dict_load(self): - """Load the dictionary containing the current state of frozen - packages.""" + return True - state_file = os.path.join(self._statedir, "frozen_dict") - if os.path.isfile(state_file): - try: - with open(state_file) as f: - version, d = json.load(f) - except EnvironmentError as e: - raise apx._convert_error(e) - except ValueError as e: - raise apx.InvalidFreezeFile(state_file) - if version != self.__FROZEN_DICT_VERSION: - raise apx.UnknownFreezeFileVersion( - version, self.__FROZEN_DICT_VERSION, - state_file) - return d - return {} - - def _freeze_dict_save(self, new_dict): - """Save the dictionary of frozen packages.""" - - # Save the dictionary to disk. - state_file = os.path.join(self._statedir, "frozen_dict") - tmp_file = os.path.join(self._statedir, "frozen_dict.new") + return __allow_action_dehydrate - try: - with open(tmp_file, "w") as tf: - json.dump( - (self.__FROZEN_DICT_VERSION, new_dict), tf) - portable.rename(tmp_file, state_file) - self.update_last_modified() - except EnvironmentError as e: - raise apx._convert_error(e) - self.__rebuild_image_catalogs() - - @staticmethod - def get_dehydrated_exclude_func(dehydrated_pubs): - """A boolean function that will be added to the pkg(7) exclude - mechanism to determine if an action is allowed to be installed - based on whether its publisher is going to be dehydrated or has - been currently dehydrated.""" - - # A closure is used so that the list of dehydrated publishers - # can be accessed. - def __allow_action_dehydrate(act, publisher): - if publisher not in dehydrated_pubs: - # Allow actions from publishers that are not - # dehydrated. - return True - - aname = act.name - if aname == "file": - attrs = act.attrs - if attrs.get("dehydrate") == "false": - return True - if "preserve" in attrs or "overlay" in attrs: - return True - return False - elif aname == "hardlink": - return False - - return True - - return __allow_action_dehydrate # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/imageconfig.py b/src/modules/client/imageconfig.py index 56b9cba09..691a9a77c 100644 --- a/src/modules/client/imageconfig.py +++ b/src/modules/client/imageconfig.py @@ -47,11 +47,13 @@ import pkg.variant as variant from pkg.client import global_settings + logger = global_settings.logger from pkg.misc import DictProperty, SIGNATURE_POLICY from pkg.client.debugvalues import DebugValues from pkg.client.transport.exception import TransportFailures + # The default_policies dictionary defines the policies that are supported by # pkg(7) and their default values. Calls to the ImageConfig.get_policy method # should use the constants defined here. @@ -86,18 +88,18 @@ } default_policy_map = { - BE_POLICY: { "default": "create-backup" }, - CONTENT_UPDATE_POLICY: { "default": "always" }, + BE_POLICY: {"default": "create-backup"}, + CONTENT_UPDATE_POLICY: {"default": "always"}, } CA_PATH = "ca-path" # Default CA_PATH is /etc/ssl/certs default_properties = { - CA_PATH: os.path.join(os.path.sep, "etc", "ssl", "certs"), - # Path default is intentionally relative for this case. - "trust-anchor-directory": os.path.join("etc", "ssl", "pkg"), - DEFAULT_CONCURRENCY: 1, - AUTO_BE_NAME: "omnios-r%r", + CA_PATH: os.path.join(os.path.sep, "etc", "ssl", "certs"), + # Path default is intentionally relative for this case. + "trust-anchor-directory": os.path.join("etc", "ssl", "pkg"), + DEFAULT_CONCURRENCY: 1, + AUTO_BE_NAME: "omnios-r%r", } # Assume the repository metadata should be checked no more than once every @@ -110,140 +112,213 @@ # Token used for default values. DEF_TOKEN = "DEFAULT" -_val_map_none = { "None": None } +_val_map_none = {"None": None} CURRENT_VERSION = 3 + class ImageConfig(cfg.FileConfig): - """An ImageConfig object is a collection of configuration information: - URLs, publishers, properties, etc. that allow an Image to operate.""" - - # This dictionary defines the set of default properties and property - # groups for an image configuration indexed by version. - __defs = { - 2: [ - cfg.PropertySection("filter", properties=[]), - cfg.PropertySection("image", properties=[ + """An ImageConfig object is a collection of configuration information: + URLs, publishers, properties, etc. that allow an Image to operate.""" + + # This dictionary defines the set of default properties and property + # groups for an image configuration indexed by version. + __defs = { + 2: [ + cfg.PropertySection("filter", properties=[]), + cfg.PropertySection( + "image", + properties=[ cfg.PropInt("version"), - ]), - cfg.PropertySection("property", properties=[ + ], + ), + cfg.PropertySection( + "property", + properties=[ cfg.PropList("publisher-search-order"), cfg.PropPublisher("preferred-authority"), cfg.PropBool("display-coprights", default=True), cfg.PropBool("require-optional", default=False), cfg.PropBool("pursue-latest", default=True), - cfg.PropBool(FLUSH_CONTENT_CACHE, - default=default_policies[FLUSH_CONTENT_CACHE]), - cfg.PropBool(SEND_UUID, - default=default_policies[SEND_UUID]), - ]), - cfg.PropertySection("variant", properties=[]), - cfg.PropertySectionTemplate("^authority_.*", properties=[ + cfg.PropBool( + FLUSH_CONTENT_CACHE, + default=default_policies[FLUSH_CONTENT_CACHE], + ), + cfg.PropBool( + SEND_UUID, default=default_policies[SEND_UUID] + ), + ], + ), + cfg.PropertySection("variant", properties=[]), + cfg.PropertySectionTemplate( + "^authority_.*", + properties=[ # Base publisher information. cfg.PropPublisher("alias", value_map=_val_map_none), cfg.PropPublisher("prefix", value_map=_val_map_none), cfg.PropBool("disabled"), cfg.PropUUID("uuid", value_map=_val_map_none), # Publisher transport information. - cfg.PropPubURIList("mirrors", - value_map=_val_map_none), + cfg.PropPubURIList("mirrors", value_map=_val_map_none), cfg.PropPubURI("origin", value_map=_val_map_none), cfg.Property("ssl_cert", value_map=_val_map_none), cfg.Property("ssl_key", value_map=_val_map_none), # Publisher repository metadata. - cfg.PropDefined("repo.collection_type", ["core", - "supplemental"], default="core", - value_map=_val_map_none), - cfg.PropDefined("repo.description", - value_map=_val_map_none), + cfg.PropDefined( + "repo.collection_type", + ["core", "supplemental"], + default="core", + value_map=_val_map_none, + ), + cfg.PropDefined( + "repo.description", value_map=_val_map_none + ), cfg.PropList("repo.legal_uris", value_map=_val_map_none), - cfg.PropDefined("repo.name", default="package repository", - value_map=_val_map_none), + cfg.PropDefined( + "repo.name", + default="package repository", + value_map=_val_map_none, + ), # Must be a string so "" can be stored. - cfg.Property("repo.refresh_seconds", + cfg.Property( + "repo.refresh_seconds", default=str(REPO_REFRESH_SECONDS_DEFAULT), - value_map=_val_map_none), + value_map=_val_map_none, + ), cfg.PropBool("repo.registered", value_map=_val_map_none), - cfg.Property("repo.registration_uri", - value_map=_val_map_none), - cfg.PropList("repo.related_uris", - value_map=_val_map_none), + cfg.Property( + "repo.registration_uri", value_map=_val_map_none + ), + cfg.PropList("repo.related_uris", value_map=_val_map_none), cfg.Property("repo.sort_policy", value_map=_val_map_none), - ]), - ], - 3: [ - cfg.PropertySection("image", properties=[ + ], + ), + ], + 3: [ + cfg.PropertySection( + "image", + properties=[ cfg.PropInt("version"), - ]), - # The preferred-authority property should be removed from - # version 4 of image config. - cfg.PropertySection("property", properties=[ + ], + ), + # The preferred-authority property should be removed from + # version 4 of image config. + cfg.PropertySection( + "property", + properties=[ cfg.PropPublisher("preferred-authority"), cfg.PropList("publisher-search-order"), - cfg.PropDefined(BE_POLICY, allowed=["default", - "always-new", "create-backup", "when-required"], - default=default_policies[BE_POLICY]), - cfg.PropDefined(CONTENT_UPDATE_POLICY, allowed=["default", - "always", "when-required"], - default=default_policies[CONTENT_UPDATE_POLICY]), - cfg.PropBool(FLUSH_CONTENT_CACHE, - default=default_policies[FLUSH_CONTENT_CACHE]), - cfg.PropBool(MIRROR_DISCOVERY, - default=default_policies[MIRROR_DISCOVERY]), - cfg.PropBool(SEND_UUID, - default=default_policies[SEND_UUID]), - cfg.PropDefined(SIGNATURE_POLICY, + cfg.PropDefined( + BE_POLICY, + allowed=[ + "default", + "always-new", + "create-backup", + "when-required", + ], + default=default_policies[BE_POLICY], + ), + cfg.PropDefined( + CONTENT_UPDATE_POLICY, + allowed=["default", "always", "when-required"], + default=default_policies[CONTENT_UPDATE_POLICY], + ), + cfg.PropBool( + FLUSH_CONTENT_CACHE, + default=default_policies[FLUSH_CONTENT_CACHE], + ), + cfg.PropBool( + MIRROR_DISCOVERY, + default=default_policies[MIRROR_DISCOVERY], + ), + cfg.PropBool( + SEND_UUID, default=default_policies[SEND_UUID] + ), + cfg.PropDefined( + SIGNATURE_POLICY, allowed=list(sigpolicy.Policy.policies()) + [DEF_TOKEN], - default=DEF_TOKEN), - cfg.PropBool(USE_SYSTEM_REPO, - default=default_policies[USE_SYSTEM_REPO]), - cfg.Property(CA_PATH, - default=default_properties[CA_PATH]), - cfg.Property("trust-anchor-directory", - default=DEF_TOKEN), + default=DEF_TOKEN, + ), + cfg.PropBool( + USE_SYSTEM_REPO, + default=default_policies[USE_SYSTEM_REPO], + ), + cfg.Property(CA_PATH, default=default_properties[CA_PATH]), + cfg.Property("trust-anchor-directory", default=DEF_TOKEN), cfg.PropList("signature-required-names"), - cfg.Property(CHECK_CERTIFICATE_REVOCATION, - default=default_policies[ - CHECK_CERTIFICATE_REVOCATION]), + cfg.Property( + CHECK_CERTIFICATE_REVOCATION, + default=default_policies[CHECK_CERTIFICATE_REVOCATION], + ), cfg.PropList("dehydrated"), cfg.PropList(EXCLUDE_PATTERNS), - cfg.PropDefined(EXCLUDE_POLICY, + cfg.PropDefined( + EXCLUDE_POLICY, allowed=["ignore", "warn", "reject"], - default=default_policies[EXCLUDE_POLICY]), + default=default_policies[EXCLUDE_POLICY], + ), cfg.PropList(KEY_FILES), - cfg.PropBool(DEFAULT_RECURSE, - default=default_policies[DEFAULT_RECURSE]), - cfg.PropInt(DEFAULT_CONCURRENCY, + cfg.PropBool( + DEFAULT_RECURSE, + default=default_policies[DEFAULT_RECURSE], + ), + cfg.PropInt( + DEFAULT_CONCURRENCY, minimum=0, - default=default_properties[DEFAULT_CONCURRENCY]), - cfg.Property(AUTO_BE_NAME, + default=default_properties[DEFAULT_CONCURRENCY], + ), + cfg.Property( + AUTO_BE_NAME, default=default_properties[AUTO_BE_NAME], - value_map=_val_map_none), - cfg.PropBool(TEMP_BE_ACTIVATION, - default=default_policies[TEMP_BE_ACTIVATION]), - ]), - cfg.PropertySection("facet", properties=[ + value_map=_val_map_none, + ), + cfg.PropBool( + TEMP_BE_ACTIVATION, + default=default_policies[TEMP_BE_ACTIVATION], + ), + ], + ), + cfg.PropertySection( + "facet", + properties=[ cfg.PropertyTemplate(r"^facet\..*", prop_type=cfg.PropBool), - ]), - cfg.PropertySection("inherited_facet", properties=[ + ], + ), + cfg.PropertySection( + "inherited_facet", + properties=[ cfg.PropertyTemplate(r"^facet\..*", prop_type=cfg.PropBool), - ]), - cfg.PropertySection("mediators", properties=[ + ], + ), + cfg.PropertySection( + "mediators", + properties=[ cfg.PropertyTemplate(r"^[A-Za-z0-9\-]+\.implementation$"), - cfg.PropertyTemplate(r"^[A-Za-z0-9\-]+\.implementation-version$", - prop_type=cfg.PropVersion), - cfg.PropertyTemplate(r"^[A-Za-z0-9\-]+\.implementation-source$", - prop_type=cfg.PropDefined, allowed=["site", "vendor", - "local", "system"], default="local"), - cfg.PropertyTemplate(r"^[A-Za-z0-9\-]+\.version$", - prop_type=cfg.PropVersion), - cfg.PropertyTemplate(r"^[A-Za-z0-9\-]+\.version-source$", - prop_type=cfg.PropDefined, allowed=["site", "vendor", - "local", "system"], default="local"), - ]), - cfg.PropertySection("variant", properties=[]), - - cfg.PropertySectionTemplate("^authority_.*", properties=[ + cfg.PropertyTemplate( + r"^[A-Za-z0-9\-]+\.implementation-version$", + prop_type=cfg.PropVersion, + ), + cfg.PropertyTemplate( + r"^[A-Za-z0-9\-]+\.implementation-source$", + prop_type=cfg.PropDefined, + allowed=["site", "vendor", "local", "system"], + default="local", + ), + cfg.PropertyTemplate( + r"^[A-Za-z0-9\-]+\.version$", prop_type=cfg.PropVersion + ), + cfg.PropertyTemplate( + r"^[A-Za-z0-9\-]+\.version-source$", + prop_type=cfg.PropDefined, + allowed=["site", "vendor", "local", "system"], + default="local", + ), + ], + ), + cfg.PropertySection("variant", properties=[]), + cfg.PropertySectionTemplate( + "^authority_.*", + properties=[ # Base publisher information. cfg.PropPublisher("alias", value_map=_val_map_none), cfg.PropPublisher("prefix", value_map=_val_map_none), @@ -252,1401 +327,1483 @@ class ImageConfig(cfg.FileConfig): cfg.PropUUID("uuid", value_map=_val_map_none), cfg.Property("last_uuid", value_map=_val_map_none), # Publisher transport information. - cfg.PropPubURIList("mirrors", - value_map=_val_map_none), + cfg.PropPubURIList("mirrors", value_map=_val_map_none), # extended information about mirrors - cfg.PropPubURIDictionaryList("mirror_info", - value_map=_val_map_none), + cfg.PropPubURIDictionaryList( + "mirror_info", value_map=_val_map_none + ), cfg.PropPubURI("origin", value_map=_val_map_none), - cfg.PropPubURIList("origins", - value_map=_val_map_none), + cfg.PropPubURIList("origins", value_map=_val_map_none), # extended information about origins - cfg.PropPubURIDictionaryList("origin_info", - value_map=_val_map_none), + cfg.PropPubURIDictionaryList( + "origin_info", value_map=_val_map_none + ), # when keys/certs can be set per-origin/mirror, these # should move into origin_info/mirror_info cfg.Property("ssl_cert", value_map=_val_map_none), cfg.Property("ssl_key", value_map=_val_map_none), # Publisher signing information. - cfg.PropDefined("property.{0}".format(SIGNATURE_POLICY), + cfg.PropDefined( + "property.{0}".format(SIGNATURE_POLICY), allowed=list(sigpolicy.Policy.policies()) + [DEF_TOKEN], - default=DEF_TOKEN), + default=DEF_TOKEN, + ), cfg.PropList("property.signature-required-names"), cfg.PropList("intermediate_certs"), cfg.PropList("approved_ca_certs"), cfg.PropList("revoked_ca_certs"), cfg.PropList("signing_ca_certs"), # Publisher repository metadata. - cfg.PropDefined("repo.collection_type", ["core", - "supplemental"], default="core", - value_map=_val_map_none), - cfg.PropDefined("repo.description", - value_map=_val_map_none), + cfg.PropDefined( + "repo.collection_type", + ["core", "supplemental"], + default="core", + value_map=_val_map_none, + ), + cfg.PropDefined( + "repo.description", value_map=_val_map_none + ), cfg.PropList("repo.legal_uris", value_map=_val_map_none), - cfg.PropDefined("repo.name", default="package repository", - value_map=_val_map_none), + cfg.PropDefined( + "repo.name", + default="package repository", + value_map=_val_map_none, + ), # Must be a string so "" can be stored. - cfg.Property("repo.refresh_seconds", + cfg.Property( + "repo.refresh_seconds", default=str(REPO_REFRESH_SECONDS_DEFAULT), - value_map=_val_map_none), + value_map=_val_map_none, + ), cfg.PropBool("repo.registered", value_map=_val_map_none), - cfg.Property("repo.registration_uri", - value_map=_val_map_none), - cfg.PropList("repo.related_uris", - value_map=_val_map_none), + cfg.Property( + "repo.registration_uri", value_map=_val_map_none + ), + cfg.PropList("repo.related_uris", value_map=_val_map_none), cfg.Property("repo.sort_policy", value_map=_val_map_none), - ]), - cfg.PropertySectionTemplate("^linked_.*", properties=[ + ], + ), + cfg.PropertySectionTemplate( + "^linked_.*", + properties=[ cfg.Property(li.PROP_NAME, value_map=_val_map_none), cfg.Property(li.PROP_PATH, value_map=_val_map_none), cfg.PropBool(li.PROP_RECURSE, default=True), - ]), - ], - } - - def __init__(self, cfgpathname, imgroot, overrides=misc.EmptyDict, - version=None, sysrepo_proxy=False): - """ - 'write_sysrepo_proxy' is a boolean, set to 'True' if this - ImageConfig should write the special publisher.SYSREPO_PROXY - token to the backing FileConfig in place of any actual proxies - used at runtime.""" - self.__imgroot = imgroot - self.__publishers = {} - self.__validate = False - self.facets = facet.Facets() - self.mediators = {} - self.variants = variant.Variants() - self.linked_children = {} - self.write_sysrepo_proxy = sysrepo_proxy - cfg.FileConfig.__init__(self, cfgpathname, - definitions=self.__defs, overrides=overrides, - version=version) - - def __str__(self): - return "{0}\n{1}".format(self.__publishers, self.__defs) - - def remove_publisher(self, prefix): - """External functional interface - use property interface""" - del self.publishers[prefix] - - def change_publisher_search_order(self, being_moved, staying_put, - after): - """Change the publisher search order by moving the publisher - 'being_moved' relative to the publisher 'staying put.' The - boolean 'after' determins whether 'being_moved' is placed before - or after 'staying_put'.""" - - so = self.get_property("property", "publisher-search-order") - so.remove(being_moved) + ], + ), + ], + } + + def __init__( + self, + cfgpathname, + imgroot, + overrides=misc.EmptyDict, + version=None, + sysrepo_proxy=False, + ): + """ + 'write_sysrepo_proxy' is a boolean, set to 'True' if this + ImageConfig should write the special publisher.SYSREPO_PROXY + token to the backing FileConfig in place of any actual proxies + used at runtime.""" + self.__imgroot = imgroot + self.__publishers = {} + self.__validate = False + self.facets = facet.Facets() + self.mediators = {} + self.variants = variant.Variants() + self.linked_children = {} + self.write_sysrepo_proxy = sysrepo_proxy + cfg.FileConfig.__init__( + self, + cfgpathname, + definitions=self.__defs, + overrides=overrides, + version=version, + ) + + def __str__(self): + return "{0}\n{1}".format(self.__publishers, self.__defs) + + def remove_publisher(self, prefix): + """External functional interface - use property interface""" + del self.publishers[prefix] + + def change_publisher_search_order(self, being_moved, staying_put, after): + """Change the publisher search order by moving the publisher + 'being_moved' relative to the publisher 'staying put.' The + boolean 'after' determins whether 'being_moved' is placed before + or after 'staying_put'.""" + + so = self.get_property("property", "publisher-search-order") + so.remove(being_moved) + try: + ind = so.index(staying_put) + except ValueError: + raise apx.MoveRelativeToUnknown(staying_put) + if after: + so.insert(ind + 1, being_moved) + else: + so.insert(ind, being_moved) + self.set_property("property", "publisher-search-order", so) + + def __get_publisher(self, prefix): + """Accessor method for publishers dictionary""" + return self.__publishers[prefix] + + def __set_publisher(self, prefix, pubobj): + """Accessor method to keep search order correct on insert""" + pval = self.get_property("property", "publisher-search-order") + if prefix not in pval: + self.add_property_value( + "property", "publisher-search-order", prefix + ) + self.__publishers[prefix] = pubobj + + def __del_publisher(self, prefix): + """Accessor method for publishers""" + pval = self.get_property("property", "publisher-search-order") + if prefix in pval: + self.remove_property_value( + "property", "publisher-search-order", prefix + ) + try: + self.remove_section("authority_{0}".format(prefix)) + except cfg.UnknownSectionError: + pass + del self.__publishers[prefix] + + def __publisher_iter(self): + return self.__publishers.__iter__() + + def __publisher_iteritems(self): + """Support iteritems on publishers""" + return six.iteritems(self.__publishers) + + def __publisher_keys(self): + """Support keys() on publishers""" + return list(self.__publishers.keys()) + + def __publisher_values(self): + """Support values() on publishers""" + return list(self.__publishers.values()) + + def get_policy(self, policy): + """Return a boolean value for the named policy. Returns + the default value for the policy if the named policy is + not defined in the image configuration. + """ + return str(self.get_policy_str(policy)).lower() in ("true", "yes") + + def get_policy_str(self, policy): + """Return the string value for the named policy. Returns + the default value for the policy if the named policy is + not defined in the image configuration. + """ + assert policy in default_policies + + prop = self.get_property("property", policy) + + # If requested policy has a default mapping in + # default_policy_map, we substitute the correct value if it's + # still set to 'default'. + if policy in default_policy_map and prop == default_policies[policy]: + return default_policy_map[policy][default_policies[policy]] + + return prop + + def get_property(self, section, name): + """Returns the value of the property object matching the given + section and name. Raises UnknownPropertyError if it does not + exist. + """ + rval = cfg.FileConfig.get_property(self, section, name) + if name in default_policies and rval == DEF_TOKEN: + return default_policies[name] + if name in default_properties and rval == DEF_TOKEN: + return default_properties[name] + return rval + + def reset(self, overrides=misc.EmptyDict): + """Discards current configuration state and returns the + configuration object to its initial state. + + 'overrides' is an optional dictionary of property values indexed + by section name and property name. If provided, it will be used + to override any default values initially assigned during reset. + """ + + # Set __validate to be False so that the order the properties + # are set here doesn't matter. + self.__validate = False + + # Allow parent class to populate property data first. + cfg.FileConfig.reset(self, overrides=overrides) + + # + # Now transform property data as needed and populate image + # configuration data structures. + # + + # Must load variants first, since in the case of zones, the + # variant can impact the processing of publishers. (Notably, + # how ssl cert and key paths are interpreted.) + idx = self.get_index() + self.variants.update(idx.get("variant", {})) + # Variants and facets are encoded so they can contain + # '/' characters. + for k, v in six.iteritems(idx.get("variant", {})): + # convert variant name from unicode to a string + self.variants[str(unquote(k))] = v + for k, v in six.iteritems(idx.get("facet", {})): + # convert facet name from unicode to a string + self.facets[str(unquote(k))] = v + for k, v in six.iteritems(idx.get("inherited_facet", {})): + # convert facet name from unicode to a string + self.facets._set_inherited(str(unquote(k)), v) + + # Ensure architecture and zone variants are defined. + if "variant.arch" not in self.variants: + self.variants["variant.arch"] = platform.processor() + if "variant.opensolaris.zone" not in self.variants: + self.variants["variant.opensolaris.zone"] = "global" + # Ensure imagetype variant is defined + if "variant.opensolaris.imagetype" not in self.variants: + self.variants["variant.opensolaris.imagetype"] = "full" + + # load linked image child properties + for s, v in six.iteritems(idx): + if not re.match("linked_.*", s): + continue + linked_props = self.read_linked(s, v) + if linked_props: + lin = linked_props[li.PROP_NAME] + assert lin not in self.linked_children + self.linked_children[lin] = linked_props + + # Merge disabled publisher file with configuration; the DA_FILE + # is used for compatibility with older clients. + dafile = os.path.join(os.path.dirname(self.target), DA_FILE) + if os.path.exists(dafile): + # Merge disabled publisher configuration data. + disabled_cfg = cfg.FileConfig( + dafile, definitions=self.__defs, version=self.version + ) + for s in disabled_cfg.get_sections(): + if s.name.startswith("authority_"): + self.add_section(s) + + # Get updated configuration index. + idx = self.get_index() + + # Sort the index so that the prefixes are added to the list + # "publisher-search-order" in alphabetic order. + for s, v in collections.OrderedDict(sorted(six.iteritems(idx))).items(): + if re.match("authority_.*", s): + k, a = self.read_publisher(s, v) + # this will call __set_publisher and add the + # prefix to "publisher-search-order". + self.publishers[k] = a + + # Move any properties found in policy section (from older + # images) to the property section. + for k, v in six.iteritems(idx.get("policy", {})): + self.set_property("property", k, v) + self.remove_property("policy", k) + + # Setup defaults for properties that have no value. + if not self.get_property("property", CA_PATH): + self.set_property("property", CA_PATH, default_properties[CA_PATH]) + + pso = self.get_property("property", "publisher-search-order") + # Ensure that all configured publishers are present in + # search order (add them in alpha order to the end). + # Also ensure that all publishers in search order that + # are not known are removed. + known_pubs = set(self.__publishers.keys()) + sorted_pubs = set(pso) + new_pubs = known_pubs - sorted_pubs + old_pubs = sorted_pubs - known_pubs + for pub in old_pubs: + pso.remove(pub) + pso.extend(sorted(new_pubs)) + self.set_property("property", "publisher-search-order", pso) + + # Load mediator data. + for entry, value in six.iteritems(idx.get("mediators", {})): + mname, mtype = entry.rsplit(".", 1) + # convert mediator name+type from unicode to a string + mname = str(mname) + mtype = str(mtype) + self.mediators.setdefault(mname, {})[mtype] = value + + # Now re-enable validation and validate the properties. + self.__validate = True + self.__validate_properties() + + # Finally, attempt to write configuration again to ensure + # changes are reflected on-disk -- but only if the version + # matches most current. + if self.version == CURRENT_VERSION: + self.write(ignore_unprivileged=True) + + def set_property(self, section, name, value): + """Sets the value of the property object matching the given + section and name. If the section or property does not already + exist, it will be added. Raises InvalidPropertyValueError if + the value is not valid for the given property.""" + + cfg.FileConfig.set_property(self, section, name, value) + + if self.__validate: + self.__validate_properties() + + def set_properties(self, properties): + """Sets the values of the property objects matching those found + in the provided dictionary. If any section or property does not + already exist, it will be added. An InvalidPropertyValueError + will be raised if the value is not valid for the given + properties. + + 'properties' should be a dictionary of dictionaries indexed by + section and then by property name. As an example: + + { + 'section': { + 'property': value + } + } + """ + + # Validation must be delayed until after all properties are set, + # in case some properties are interdependent for validation. + self.__validate = False + try: + cfg.FileConfig.set_properties(self, properties) + finally: + # Ensure validation is re-enabled even if an exception + # is raised. + self.__validate = True + + self.__validate_properties() + + def write(self, ignore_unprivileged=False): + """Write the image configuration.""" + + # The variant, facet, and mediator sections must be removed so + # the private copies can be transferred to the configuration + # object. + try: + self.remove_section("variant") + except cfg.UnknownSectionError: + pass + for f in self.variants: + self.set_property("variant", quote(f, ""), self.variants[f]) + + try: + self.remove_section("facet") + except cfg.UnknownSectionError: + pass + # save local facets + for f in self.facets.local: + self.set_property("facet", quote(f, ""), self.facets.local[f]) + + try: + self.remove_section("inherited_facet") + except cfg.UnknownSectionError: + pass + # save inherited facets + for f in self.facets.inherited: + self.set_property( + "inherited_facet", quote(f, ""), self.facets.inherited[f] + ) + + try: + self.remove_section("mediators") + except cfg.UnknownSectionError: + pass + for mname, mvalues in six.iteritems(self.mediators): + for mtype, mvalue in six.iteritems(mvalues): + # name.implementation[-(source|version)] + # name.version[-source] + pname = mname + "." + mtype + self.set_property("mediators", pname, mvalue) + + # remove all linked image child configuration + idx = self.get_index() + for s, v in six.iteritems(idx): + if not re.match("linked_.*", s): + continue + self.remove_section(s) + + # add sections for any known linked children + for lin in sorted(self.linked_children): + linked_props = self.linked_children[lin] + s = "linked_{0}".format(str(lin)) + for k in [li.PROP_NAME, li.PROP_PATH, li.PROP_RECURSE]: + self.set_property(s, k, str(linked_props[k])) + + # Transfer current publisher information to configuration. + for prefix in self.__publishers: + pub = self.__publishers[prefix] + section = "authority_{0}".format(pub.prefix) + + for prop in ( + "alias", + "prefix", + "approved_ca_certs", + "revoked_ca_certs", + "disabled", + "sticky", + ): + self.set_property(section, prop, getattr(pub, prop)) + + # Force removal of origin property when writing. It + # should only exist when configuration is loaded if + # the client is using an older image. + try: + self.remove_property(section, "origin") + except cfg.UnknownPropertyError: + # Already gone. + pass + + # Store SSL Cert and Key data. + repo = pub.repository + p = "" + for o in repo.origins: + if o.ssl_key: + p = str(o.ssl_key) + break + self.set_property(section, "ssl_key", p) + + p = "" + for o in repo.origins: + if o.ssl_cert: + p = str(o.ssl_cert) + break + self.set_property(section, "ssl_cert", p) + + # Store per-origin/mirror information. For now, this + # information is limited to the proxy used for each URI. + for repouri_list, prop_name in [ + (repo.origins, "origin_info"), + (repo.mirrors, "mirror_info"), + ]: + plist = [] + for r in repouri_list: + # Convert boolean value into string for + # json. + r_disabled = "false" + if r.disabled: + r_disabled = "true" + if not r.proxies: + plist.append( + {"uri": r.uri, "proxy": "", "disabled": r_disabled} + ) + continue + for p in r.proxies: + # sys_cfg proxy values should + # always be '' so that + # if the system-repository port + # is changed, that gets picked + # up in the image. See + # read_publisher(..) and + # BlendedConfig.__merge_publishers + if self.write_sysrepo_proxy: + puri = publisher.SYSREPO_PROXY + elif not p: + # we do not want None + # stringified to 'None' + puri = "" + else: + puri = p.uri + plist.append( + { + "uri": r.uri, + "proxy": puri, + "disabled": r_disabled, + } + ) + + self.set_property(section, prop_name, str(plist)) + + # Store publisher UUID. + self.set_property(section, "uuid", pub.client_uuid) + self.set_property(section, "last_uuid", pub.client_uuid_time) + + # Write repository origins and mirrors, ensuring the + # list contains unique values. We must check for + # uniqueness manually, rather than using a set() + # to properly preserve the order in which origins and + # mirrors are set. + for prop in ("origins", "mirrors"): + pval = [str(v) for v in getattr(repo, prop)] + values = set() + unique_pval = [] + for item in pval: + if item not in values: + values.add(item) + unique_pval.append(item) + + self.set_property(section, prop, unique_pval) + + for prop in ( + "collection_type", + "description", + "legal_uris", + "name", + "refresh_seconds", + "registered", + "registration_uri", + "related_uris", + "sort_policy", + ): + pval = getattr(repo, prop) + if isinstance(pval, list): + # Stringify lists of objects; this + # assumes the underlying objects + # can be stringified properly. + pval = [str(v) for v in pval] + + cfg_key = "repo.{0}".format(prop) + if prop == "registration_uri": + # Must be stringified. + pval = str(pval) + self.set_property(section, cfg_key, pval) + + secobj = self.get_section(section) + for pname in secobj.get_index(): + if ( + pname.startswith("property.") + and pname[len("property.") :] not in pub.properties + ): + # Ensure properties not currently set + # for the publisher are removed from + # the existing configuration. + secobj.remove_property(pname) + + for key, val in pub.properties.iteritems(): + if val == DEF_TOKEN: + continue + self.set_property(section, "property.{0}".format(key), val) + + # Write configuration only if configuration directory exists; + # this is to prevent failure during the early stages of image + # creation. + if os.path.exists(os.path.dirname(self.target)): + # Discard old disabled publisher configuration if it + # exists. + da_path = os.path.join(os.path.dirname(self.target), DA_FILE) + try: + portable.remove(da_path) + except EnvironmentError as e: + # Don't care if the file is already gone. + if e.errno != errno.ENOENT: + exc = apx._convert_error(e) + if ( + not isinstance(exc, apx.PermissionsException) + or not ignore_unprivileged + ): + raise exc + + # Ensure properties with the special value of DEF_TOKEN + # are never written so that if the default value is + # changed later, clients will automatically get that + # value instead of the previous one. + default = [] + for name in list(default_properties.keys()) + list( + default_policies.keys() + ): + # The actual class method must be called here as + # ImageConfig's set_property can return the + # value that maps to 'DEFAULT' instead. + secobj = self.get_section("property") try: - ind = so.index(staying_put) - except ValueError: - raise apx.MoveRelativeToUnknown(staying_put) - if after: - so.insert(ind + 1, being_moved) + propobj = secobj.get_property(name) + except cfg.UnknownPropertyError: + # Property was removed, so skip it. + continue + + if propobj.value == DEF_TOKEN: + default.append(name) + secobj.remove_property(name) + + try: + cfg.FileConfig.write(self) + except apx.PermissionsException: + if not ignore_unprivileged: + raise + finally: + # Merge default props back into configuration. + for name in default: + self.set_property("property", name, DEF_TOKEN) + + def read_linked(self, s, sidx): + """Read linked image properties associated with a child image. + Zone linked images do not store their properties here in the + image config. + + If we encounter an error while parsing property data, then + instead of throwing an error/exception which the user would + have no way of fixing, we simply return and ignore the child. + The child data will be removed from the config file the next + time it gets re-written, and if the user want the child back + they'll have to re-attach it.""" + + linked_props = dict() + + # Check for known properties + for k in [li.PROP_NAME, li.PROP_PATH, li.PROP_RECURSE]: + if k not in sidx: + # we're missing a property + return None + linked_props[k] = sidx[k] + + # all children saved in the config file are pushed based + linked_props[li.PROP_MODEL] = li.PV_MODEL_PUSH + + # make sure the name is valid + try: + lin = li.LinkedImageName(linked_props[li.PROP_NAME]) + except apx.MalformedLinkedImageName: + # invalid child image name + return None + linked_props[li.PROP_NAME] = lin + + # check if this image is already defined + if lin in self.linked_children: + # duplicate child linked image data, first copy wins + return None + + return linked_props + + def read_publisher(self, sname, sec_idx): + # sname is the section of the config file. + # publisher block has alias, prefix, origin, and mirrors + + # Add 'origin' to list of origins if it doesn't exist already. + origins = sec_idx.get("origins", []) + origin = sec_idx.get("origin", None) + if origin and origin not in origins: + origins.append(origin) + + mirrors = sec_idx.get("mirrors", []) + + # [origin|mirror]_info dictionaries map URIs to a list of dicts + # containing property values for that URI. (we allow a single + # origin to have several dictionaries with different properties) + # As we go, we crosscheck against the lists of known + # origins/mirrors. + # + # For now, the only property tracked on a per-origin/mirror + # basis is which proxy (if any) is used to access it. In the + # future, we may wish to store additional information per-URI, + # eg. + # {"proxy": "", "ssl_key": "", "ssl_cert": ""} + # + # Storing multiple dictionaries, means we could store several + # proxies or key/value pairs per URI if necessary. This list + # of dictionaries is intentionally flat, so that the underlying + # configuration file is easily human-readable. + # + origin_info = {} + mirror_info = {} + for repouri_info, prop, orig in [ + (origin_info, "origin_info", origins), + (mirror_info, "mirror_info", mirrors), + ]: + # get the list of dictionaries from the cfg + plist = sec_idx.get(prop, []) + if not plist: + continue + for uri_info in plist: + uri = uri_info["uri"] + proxy = uri_info.get("proxy") + disabled = uri_info.get("disabled", False) + # Convert a "" proxy value to None + if proxy == "": + proxy = None + + # if the uri isn't in either 'origins' or + # 'mirrors', then we've likely deleted it + # using an older pkg(7) client format. We + # must ignore this entry. + if uri not in orig: + continue + + repouri_info.setdefault(uri, []).append( + {"uri": uri, "proxy": proxy, "disabled": disabled} + ) + + props = {} + for k, v in six.iteritems(sec_idx): + if not k.startswith("property."): + continue + prop_name = k[len("property.") :] + if v == DEF_TOKEN: + # Discard publisher properties with the + # DEF_TOKEN value; allow the publisher class to + # handle these. + self.remove_property(sname, k) + continue + props[prop_name] = v + + # Load repository data. + repo_data = {} + for key, val in six.iteritems(sec_idx): + if key.startswith("repo."): + pname = key[len("repo.") :] + repo_data[pname] = val + + # Normalize/sanitize repository data. + for attr in ("collection_type", "sort_policy"): + if not repo_data[attr]: + # Assume default value for attr. + del repo_data[attr] + + if repo_data["refresh_seconds"] == "": + repo_data["refresh_seconds"] = str(REPO_REFRESH_SECONDS_DEFAULT) + + prefix = sec_idx["prefix"] + ssl_key = sec_idx["ssl_key"] + ssl_cert = sec_idx["ssl_cert"] + + r = publisher.Repository(**repo_data) + + # Set per-origin/mirror URI properties + for uri_list, info_map, repo_add_func in [ + (origins, origin_info, r.add_origin), + (mirrors, mirror_info, r.add_mirror), + ]: + for uri in uri_list: + # If we didn't gather property information for + # this origin/mirror, assume a single + # origin/mirror with no proxy. + plist = info_map.get(uri, [{}]) + proxies = [] + disabled = False + for uri_info in plist: + proxy = uri_info.get("proxy") + if uri_info.get("disabled") == "true": + disabled = True + if proxy: + if proxy == publisher.SYSREPO_PROXY: + p = publisher.ProxyURI(None, system=True) + else: + p = publisher.ProxyURI(proxy) + proxies.append(p) + + if not any( + uri.startswith(scheme + ":") + for scheme in publisher.SSL_SCHEMES + ): + repouri = publisher.RepositoryURI( + uri, proxies=proxies, disabled=disabled + ) else: - so.insert(ind, being_moved) - self.set_property("property", "publisher-search-order", so) - - def __get_publisher(self, prefix): - """Accessor method for publishers dictionary""" - return self.__publishers[prefix] - - def __set_publisher(self, prefix, pubobj): - """Accessor method to keep search order correct on insert""" - pval = self.get_property("property", "publisher-search-order") - if prefix not in pval: - self.add_property_value("property", - "publisher-search-order", prefix) - self.__publishers[prefix] = pubobj - - def __del_publisher(self, prefix): - """Accessor method for publishers""" - pval = self.get_property("property", "publisher-search-order") - if prefix in pval: - self.remove_property_value("property", - "publisher-search-order", prefix) - try: - self.remove_section("authority_{0}".format(prefix)) - except cfg.UnknownSectionError: - pass - del self.__publishers[prefix] - - def __publisher_iter(self): - return self.__publishers.__iter__() - - def __publisher_iteritems(self): - """Support iteritems on publishers""" - return six.iteritems(self.__publishers) - - def __publisher_keys(self): - """Support keys() on publishers""" - return list(self.__publishers.keys()) - - def __publisher_values(self): - """Support values() on publishers""" - return list(self.__publishers.values()) - - def get_policy(self, policy): - """Return a boolean value for the named policy. Returns - the default value for the policy if the named policy is - not defined in the image configuration. - """ - return str(self.get_policy_str(policy)).lower() in ("true", - "yes") - - def get_policy_str(self, policy): - """Return the string value for the named policy. Returns - the default value for the policy if the named policy is - not defined in the image configuration. - """ - assert policy in default_policies - - prop = self.get_property("property", policy) - - # If requested policy has a default mapping in - # default_policy_map, we substitute the correct value if it's - # still set to 'default'. - if policy in default_policy_map and \ - prop == default_policies[policy]: - return default_policy_map[policy] \ - [default_policies[policy]] - - return prop - - def get_property(self, section, name): - """Returns the value of the property object matching the given - section and name. Raises UnknownPropertyError if it does not - exist. - """ - rval = cfg.FileConfig.get_property(self, section, name) - if name in default_policies and rval == DEF_TOKEN: - return default_policies[name] - if name in default_properties and rval == DEF_TOKEN: - return default_properties[name] - return rval - - def reset(self, overrides=misc.EmptyDict): - """Discards current configuration state and returns the - configuration object to its initial state. - - 'overrides' is an optional dictionary of property values indexed - by section name and property name. If provided, it will be used - to override any default values initially assigned during reset. - """ - - # Set __validate to be False so that the order the properties - # are set here doesn't matter. - self.__validate = False - - # Allow parent class to populate property data first. - cfg.FileConfig.reset(self, overrides=overrides) - - # - # Now transform property data as needed and populate image - # configuration data structures. - # - - # Must load variants first, since in the case of zones, the - # variant can impact the processing of publishers. (Notably, - # how ssl cert and key paths are interpreted.) - idx = self.get_index() - self.variants.update(idx.get("variant", {})) - # Variants and facets are encoded so they can contain - # '/' characters. - for k, v in six.iteritems(idx.get("variant", {})): - # convert variant name from unicode to a string - self.variants[str(unquote(k))] = v - for k, v in six.iteritems(idx.get("facet", {})): - # convert facet name from unicode to a string - self.facets[str(unquote(k))] = v - for k, v in six.iteritems(idx.get("inherited_facet", {})): - # convert facet name from unicode to a string - self.facets._set_inherited(str(unquote(k)), v) - - # Ensure architecture and zone variants are defined. - if "variant.arch" not in self.variants: - self.variants["variant.arch"] = platform.processor() - if "variant.opensolaris.zone" not in self.variants: - self.variants["variant.opensolaris.zone"] = "global" - # Ensure imagetype variant is defined - if "variant.opensolaris.imagetype" not in self.variants: - self.variants["variant.opensolaris.imagetype"] = "full" - - # load linked image child properties - for s, v in six.iteritems(idx): - if not re.match("linked_.*", s): - continue - linked_props = self.read_linked(s, v) - if linked_props: - lin = linked_props[li.PROP_NAME] - assert lin not in self.linked_children - self.linked_children[lin] = linked_props - - # Merge disabled publisher file with configuration; the DA_FILE - # is used for compatibility with older clients. - dafile = os.path.join(os.path.dirname(self.target), DA_FILE) - if os.path.exists(dafile): - # Merge disabled publisher configuration data. - disabled_cfg = cfg.FileConfig(dafile, - definitions=self.__defs, version=self.version) - for s in disabled_cfg.get_sections(): - if s.name.startswith("authority_"): - self.add_section(s) - - # Get updated configuration index. - idx = self.get_index() - - # Sort the index so that the prefixes are added to the list - # "publisher-search-order" in alphabetic order. - for s, v in collections.OrderedDict( - sorted(six.iteritems(idx))).items(): - if re.match("authority_.*", s): - k, a = self.read_publisher(s, v) - # this will call __set_publisher and add the - # prefix to "publisher-search-order". - self.publishers[k] = a - - # Move any properties found in policy section (from older - # images) to the property section. - for k, v in six.iteritems(idx.get("policy", {})): - self.set_property("property", k, v) - self.remove_property("policy", k) - - # Setup defaults for properties that have no value. - if not self.get_property("property", CA_PATH): - self.set_property("property", CA_PATH, - default_properties[CA_PATH]) - - pso = self.get_property("property", "publisher-search-order") - # Ensure that all configured publishers are present in - # search order (add them in alpha order to the end). - # Also ensure that all publishers in search order that - # are not known are removed. - known_pubs = set(self.__publishers.keys()) - sorted_pubs = set(pso) - new_pubs = known_pubs - sorted_pubs - old_pubs = sorted_pubs - known_pubs - for pub in old_pubs: - pso.remove(pub) - pso.extend(sorted(new_pubs)) - self.set_property("property", "publisher-search-order", pso) - - # Load mediator data. - for entry, value in six.iteritems(idx.get("mediators", {})): - mname, mtype = entry.rsplit(".", 1) - # convert mediator name+type from unicode to a string - mname = str(mname) - mtype = str(mtype) - self.mediators.setdefault(mname, {})[mtype] = value - - # Now re-enable validation and validate the properties. - self.__validate = True - self.__validate_properties() - - # Finally, attempt to write configuration again to ensure - # changes are reflected on-disk -- but only if the version - # matches most current. - if self.version == CURRENT_VERSION: - self.write(ignore_unprivileged=True) - - def set_property(self, section, name, value): - """Sets the value of the property object matching the given - section and name. If the section or property does not already - exist, it will be added. Raises InvalidPropertyValueError if - the value is not valid for the given property.""" - - cfg.FileConfig.set_property(self, section, name, value) - - if self.__validate: - self.__validate_properties() - - def set_properties(self, properties): - """Sets the values of the property objects matching those found - in the provided dictionary. If any section or property does not - already exist, it will be added. An InvalidPropertyValueError - will be raised if the value is not valid for the given - properties. - - 'properties' should be a dictionary of dictionaries indexed by - section and then by property name. As an example: - - { - 'section': { - 'property': value - } - } - """ - - # Validation must be delayed until after all properties are set, - # in case some properties are interdependent for validation. - self.__validate = False - try: - cfg.FileConfig.set_properties(self, properties) - finally: - # Ensure validation is re-enabled even if an exception - # is raised. - self.__validate = True + repouri = publisher.RepositoryURI( + uri, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + proxies=proxies, + disabled=disabled, + ) + repo_add_func(repouri) + + pub = publisher.Publisher( + prefix, + alias=sec_idx["alias"], + client_uuid=sec_idx["uuid"], + client_uuid_time=sec_idx["last_uuid"], + disabled=sec_idx["disabled"], + repository=r, + sticky=sec_idx.get("sticky", True), + props=props, + revoked_ca_certs=sec_idx.get("revoked_ca_certs", []), + approved_ca_certs=sec_idx.get("approved_ca_certs", []), + ) + + if pub.client_uuid != sec_idx["uuid"]: + # Publisher has generated new uuid; ensure configuration + # matches. + self.set_property(sname, "uuid", pub.client_uuid) + self.set_property(sname, "last_uuid", pub.client_uuid_time) + + return prefix, pub + + def __validate_properties(self): + """Check that properties are consistent with each other.""" + + try: + polval = self.get_property("property", SIGNATURE_POLICY) + except cfg.PropertyConfigError: + # If it hasn't been set yet, there's nothing to + # validate. + return + + if polval == "require-names": + signames = self.get_property("property", "signature-required-names") + if not signames: + raise apx.InvalidPropertyValue( + _( + "At least one name must be provided for " + "the signature-required-names policy." + ) + ) + + def __publisher_getdefault(self, name, value): + """Support getdefault() on properties""" + return self.__publishers.get(name, value) + + # properties so we can enforce rules + publishers = DictProperty( + __get_publisher, + __set_publisher, + __del_publisher, + __publisher_iteritems, + __publisher_keys, + __publisher_values, + __publisher_iter, + doc="A dict mapping publisher prefixes to publisher objects", + fgetdefault=__publisher_getdefault, + ) - self.__validate_properties() - def write(self, ignore_unprivileged=False): - """Write the image configuration.""" +class NullSystemPublisher(object): + """Dummy system publisher object for use when an image doesn't use a + system publisher.""" + + # property.proxied-urls is here for backwards compatibility, it is no + # longer used by pkg(7) + __supported_props = ( + "publisher-search-order", + "property.proxied-urls", + SIGNATURE_POLICY, + "signature-required-names", + ) + + def __init__(self): + self.publishers = {} + self.__props = dict([(p, []) for p in self.__supported_props]) + self.__props[SIGNATURE_POLICY] = default_policies[SIGNATURE_POLICY] + + def write(self): + return + + def get_property(self, section, name): + """Return the value of the property if the NullSystemPublisher + has any knowledge of it.""" + + if section == "property" and name in self.__supported_props: + return self.__props[name] + raise NotImplementedError() + + def set_property(self, section, name, value): + if section == "property" and name in self.__supported_props: + self.__props[name] = value + self.__validate_properties() + return + raise NotImplementedError() + + def __validate_properties(self): + """Check that properties are consistent with each other.""" + + try: + polval = self.get_property("property", SIGNATURE_POLICY) + except cfg.PropertyConfigError: + # If it hasn't been set yet, there's nothing to + # validate. + return + + if polval == "require-names": + signames = self.get_property("property", "signature-required-names") + if not signames: + raise apx.InvalidPropertyValue( + _( + "At least one name must be provided for " + "the signature-required-names policy." + ) + ) + + def set_properties(self, properties): + """Set multiple properties at one time.""" + + if list(properties.keys()) != ["property"]: + raise NotImplementedError + props = properties["property"] + if not all(k in self.__supported_props for k in props): + raise NotImplementedError() + self.__props.update(props) + self.__validate_properties() - # The variant, facet, and mediator sections must be removed so - # the private copies can be transferred to the configuration - # object. - try: - self.remove_section("variant") - except cfg.UnknownSectionError: - pass - for f in self.variants: - self.set_property("variant", - quote(f, ""), self.variants[f]) +class BlendedConfig(object): + """Class which handles combining the system repository configuration + with the image configuration.""" + + def __init__(self, img_cfg, pkg_counts, imgdir, transport, use_system_pub): + """The 'img_cfg' parameter is the ImageConfig object for the + image. + + The 'pkg_counts' parameter is a list of tuples which contains + the number of packages each publisher has installed. + + The 'imgdir' parameter is the directory the current image + resides in. + + The 'transport' object is the image's transport. + + The 'use_system_pub' parameter is a boolean which indicates + whether the system publisher should be used.""" + + self.img_cfg = img_cfg + self.__pkg_counts = pkg_counts + + self.__proxy_url = None + + syscfg_path = os.path.join(imgdir, "pkg5.syspub") + # load the existing system repo config + if os.path.exists(syscfg_path): + old_sysconfig = ImageConfig(syscfg_path, None) + else: + old_sysconfig = NullSystemPublisher() + + # A tuple of properties whose value should be taken from the + # system repository configuration and not the image + # configuration. + self.__system_override_properties = ( + SIGNATURE_POLICY, + "signature-required-names", + ) + + self.__write_sys_cfg = True + if use_system_pub: + # get new syspub data from sysdepot + try: + self.__proxy_url = os.environ["PKG_SYSREPO_URL"] + if not self.__proxy_url.startswith("http://"): + self.__proxy_url = "http://" + self.__proxy_url + except KeyError: try: - self.remove_section("facet") - except cfg.UnknownSectionError: - pass - # save local facets - for f in self.facets.local: - self.set_property("facet", - quote(f, ""), self.facets.local[f]) - + host = smf.get_prop( + "application/pkg/zones-proxy-client", + "config/listen_host", + ) + port = smf.get_prop( + "application/pkg/zones-proxy-client", + "config/listen_port", + ) + except smf.NonzeroExitException as e: + # If we can't get information out of + # smf, try using pkg/sysrepo. + try: + host = smf.get_prop( + "application/pkg/system-repository:default", + "config/host", + ) + host = "localhost" + port = smf.get_prop( + "application/pkg/system-repository:default", + "config/port", + ) + except smf.NonzeroExitException as e: + raise apx.UnknownSysrepoConfiguration() + self.__proxy_url = "http://{0}:{1}".format(host, port) + # We use system=True so that we don't try to retrieve + # runtime $http_proxy environment variables in + # pkg.client.publisher.TransportRepoURI.__get_runtime_proxy(..) + # See also how 'system' is handled in + # pkg.client.transport.engine.CurlTransportEngine.__setup_handle(..) + # pkg.client.transport.repo.get_syspub_info(..) + sysdepot_uri = publisher.RepositoryURI( + self.__proxy_url, system=True + ) + assert sysdepot_uri.get_host() + try: + pubs, props = transport.get_syspub_data(sysdepot_uri) + except TransportFailures: + self.sys_cfg = old_sysconfig + self.__write_sys_cfg = False + else: try: - self.remove_section("inherited_facet") - except cfg.UnknownSectionError: + try: + # Try to remove any previous + # system repository + # configuration. + portable.remove(syscfg_path) + except OSError as e: + if e.errno == errno.ENOENT: + # Check to see whether + # we'll be able to write + # the configuration + # later. + with open(syscfg_path, "wb") as fh: + fh.close() + self.sys_cfg = ImageConfig(syscfg_path, None) + else: + raise + except OSError as e: + if e.errno in (errno.EACCES, errno.EROFS): + # A permissions error means that + # either we couldn't remove the + # existing configuration or + # create a new configuration in + # that place. In that case, use + # an in-memory only version of + # the ImageConfig. + self.sys_cfg = NullSystemPublisher() + self.__write_sys_cfg = False + else: + raise + else: + # The previous configuration was + # successfully removed, so use that + # location for the new ImageConfig. + self.sys_cfg = ImageConfig( + syscfg_path, None, sysrepo_proxy=True + ) + for p in pubs: + assert not p.disabled, ( + "System " + "publisher {0} was unexpectedly " + "marked disabled in system " + "configuration.".format(p.prefix) + ) + self.sys_cfg.publishers[p.prefix] = p + + self.sys_cfg.set_property( + "property", + "publisher-search-order", + props["publisher-search-order"], + ) + # A dictionary is used to change both of these + # properties at once since setting the + # signature-policy to require-names without + # having any require-names set will cause + # property validation to fail. + d = {} + if SIGNATURE_POLICY in props: + d.setdefault("property", {})[SIGNATURE_POLICY] = props[ + SIGNATURE_POLICY + ] + if "signature-required-names" in props: + d.setdefault("property", {})[ + "signature-required-names" + ] = props["signature-required-names"] + if d: + self.sys_cfg.set_properties(d) + else: + self.sys_cfg = NullSystemPublisher() + self.__system_override_properties = () + + ( + self.__publishers, + self.added_pubs, + self.removed_pubs, + self.modified_pubs, + ) = self.__merge_publishers( + self.img_cfg, + self.sys_cfg, + pkg_counts, + old_sysconfig, + self.__proxy_url, + ) + + @staticmethod + def __merge_publishers( + img_cfg, sys_cfg, pkg_counts, old_sysconfig, proxy_url + ): + """This function merges an old publisher configuration from the + system repository with the new publisher configuration from the + system repository. It returns a tuple containing a dictionary + mapping prefix to publisher, the publisher objects for the newly + added system publishers, and the publisher objects for the + system publishers which were removed. + + The 'img_cfg' parameter is the ImageConfig object for the + image. + + The 'sys_cfg' parameter is the ImageConfig object containing the + publisher configuration from the system repository. + + The 'pkg_counts' parameter is a list of tuples which contains + the number of packages each publisher has installed. + + The 'old_sysconfig' parameter is ImageConfig object containing + the previous publisher configuration from the system repository. + + The 'proxy_url' parameter is the url for the system repository. + """ + + pubs_with_installed_pkgs = set() + for prefix, cnt, ver_cnt in pkg_counts: + if cnt > 0: + pubs_with_installed_pkgs.add(prefix) + + # keep track of old system publishers which are becoming + # disabled image publishers (because they have packages + # installed). + disabled_pubs = set() + + # Merge in previously existing system publishers which have + # installed packages. + for prefix in old_sysconfig.get_property( + "property", "publisher-search-order" + ): + if ( + prefix in sys_cfg.publishers + or prefix in img_cfg.publishers + or prefix not in pubs_with_installed_pkgs + ): + continue + + # only report this publisher as disabled if it wasn't + # previously reported and saved as disabled. + if not old_sysconfig.publishers[prefix].disabled: + disabled_pubs |= set([prefix]) + + sys_cfg.publishers[prefix] = old_sysconfig.publishers[prefix] + sys_cfg.publishers[prefix].disabled = True + + # if a syspub publisher is no longer available then + # remove all the origin and mirror information + # associated with that publisher. + sys_cfg.publishers[prefix].repository.origins = [] + sys_cfg.publishers[prefix].repository.mirrors = [] + + # check if any system publisher have had origin changes. + modified_pubs = set() + for prefix in set(old_sysconfig.publishers) & set(sys_cfg.publishers): + pold = old_sysconfig.publishers[prefix] + pnew = sys_cfg.publishers[prefix] + if list(map(str, pold.repository.origins)) != list( + map(str, pnew.repository.origins) + ): + modified_pubs |= set([prefix]) + + if proxy_url: + # We must replace the temporary "system" proxy with the + # real URL of the system-repository. + system_proxy = publisher.ProxyURI(None, system=True) + real_system_proxy = publisher.ProxyURI(proxy_url) + for p in sys_cfg.publishers.values(): + for o in p.repository.origins: + o.system = True + try: + i = o.proxies.index(system_proxy) + o.proxies[i] = real_system_proxy + except ValueError: pass - # save inherited facets - for f in self.facets.inherited: - self.set_property("inherited_facet", - quote(f, ""), self.facets.inherited[f]) - try: - self.remove_section("mediators") - except cfg.UnknownSectionError: + for m in p.repository.mirrors: + m.system = True + try: + i = m.proxies.index(system_proxy) + m.proxies[i] = real_system_proxy + except ValueError: pass - for mname, mvalues in six.iteritems(self.mediators): - for mtype, mvalue in six.iteritems(mvalues): - # name.implementation[-(source|version)] - # name.version[-source] - pname = mname + "." + mtype - self.set_property("mediators", pname, mvalue) - - # remove all linked image child configuration - idx = self.get_index() - for s, v in six.iteritems(idx): - if not re.match("linked_.*", s): - continue - self.remove_section(s) - - # add sections for any known linked children - for lin in sorted(self.linked_children): - linked_props = self.linked_children[lin] - s = "linked_{0}".format(str(lin)) - for k in [li.PROP_NAME, li.PROP_PATH, li.PROP_RECURSE]: - self.set_property(s, k, str(linked_props[k])) - - - # Transfer current publisher information to configuration. - for prefix in self.__publishers: - pub = self.__publishers[prefix] - section = "authority_{0}".format(pub.prefix) - - for prop in ("alias", "prefix", "approved_ca_certs", - "revoked_ca_certs", "disabled", "sticky"): - self.set_property(section, prop, - getattr(pub, prop)) - - # Force removal of origin property when writing. It - # should only exist when configuration is loaded if - # the client is using an older image. - try: - self.remove_property(section, "origin") - except cfg.UnknownPropertyError: - # Already gone. - pass - - # Store SSL Cert and Key data. - repo = pub.repository - p = "" - for o in repo.origins: - if o.ssl_key: - p = str(o.ssl_key) - break - self.set_property(section, "ssl_key", p) - - p = "" - for o in repo.origins: - if o.ssl_cert: - p = str(o.ssl_cert) - break - self.set_property(section, "ssl_cert", p) - - # Store per-origin/mirror information. For now, this - # information is limited to the proxy used for each URI. - for repouri_list, prop_name in [ - (repo.origins, "origin_info"), - (repo.mirrors, "mirror_info")]: - plist = [] - for r in repouri_list: - # Convert boolean value into string for - # json. - r_disabled = "false" - if r.disabled: - r_disabled = "true" - if not r.proxies: - plist.append( - {"uri": r.uri, - "proxy": "", - "disabled": r_disabled}) - continue - for p in r.proxies: - # sys_cfg proxy values should - # always be '' so that - # if the system-repository port - # is changed, that gets picked - # up in the image. See - # read_publisher(..) and - # BlendedConfig.__merge_publishers - if self.write_sysrepo_proxy: - puri = publisher.SYSREPO_PROXY - elif not p: - # we do not want None - # stringified to 'None' - puri = "" - else: - puri = p.uri - plist.append( - {"uri": r.uri, - "proxy": puri, - "disabled": r_disabled}) - - self.set_property(section, prop_name, - str(plist)) - - # Store publisher UUID. - self.set_property(section, "uuid", pub.client_uuid) - self.set_property(section, "last_uuid", - pub.client_uuid_time) - - # Write repository origins and mirrors, ensuring the - # list contains unique values. We must check for - # uniqueness manually, rather than using a set() - # to properly preserve the order in which origins and - # mirrors are set. - for prop in ("origins", "mirrors"): - pval = [str(v) for v in - getattr(repo, prop)] - values = set() - unique_pval = [] - for item in pval: - if item not in values: - values.add(item) - unique_pval.append(item) - - self.set_property(section, prop, unique_pval) - - for prop in ("collection_type", - "description", "legal_uris", "name", - "refresh_seconds", "registered", "registration_uri", - "related_uris", "sort_policy"): - pval = getattr(repo, prop) - if isinstance(pval, list): - # Stringify lists of objects; this - # assumes the underlying objects - # can be stringified properly. - pval = [str(v) for v in pval] - - cfg_key = "repo.{0}".format(prop) - if prop == "registration_uri": - # Must be stringified. - pval = str(pval) - self.set_property(section, cfg_key, pval) - - secobj = self.get_section(section) - for pname in secobj.get_index(): - if pname.startswith("property.") and \ - pname[len("property."):] not in pub.properties: - # Ensure properties not currently set - # for the publisher are removed from - # the existing configuration. - secobj.remove_property(pname) - - for key, val in pub.properties.iteritems(): - if val == DEF_TOKEN: - continue - self.set_property(section, - "property.{0}".format(key), val) - - # Write configuration only if configuration directory exists; - # this is to prevent failure during the early stages of image - # creation. - if os.path.exists(os.path.dirname(self.target)): - # Discard old disabled publisher configuration if it - # exists. - da_path = os.path.join(os.path.dirname(self.target), - DA_FILE) - try: - portable.remove(da_path) - except EnvironmentError as e: - # Don't care if the file is already gone. - if e.errno != errno.ENOENT: - exc = apx._convert_error(e) - if not isinstance(exc, apx.PermissionsException) or \ - not ignore_unprivileged: - raise exc - - # Ensure properties with the special value of DEF_TOKEN - # are never written so that if the default value is - # changed later, clients will automatically get that - # value instead of the previous one. - default = [] - for name in (list(default_properties.keys()) + - list(default_policies.keys())): - # The actual class method must be called here as - # ImageConfig's set_property can return the - # value that maps to 'DEFAULT' instead. - secobj = self.get_section("property") - try: - propobj = secobj.get_property(name) - except cfg.UnknownPropertyError: - # Property was removed, so skip it. - continue - - if propobj.value == DEF_TOKEN: - default.append(name) - secobj.remove_property(name) - - try: - cfg.FileConfig.write(self) - except apx.PermissionsException: - if not ignore_unprivileged: - raise - finally: - # Merge default props back into configuration. - for name in default: - self.set_property("property", name, - DEF_TOKEN) - - def read_linked(self, s, sidx): - """Read linked image properties associated with a child image. - Zone linked images do not store their properties here in the - image config. - - If we encounter an error while parsing property data, then - instead of throwing an error/exception which the user would - have no way of fixing, we simply return and ignore the child. - The child data will be removed from the config file the next - time it gets re-written, and if the user want the child back - they'll have to re-attach it.""" - - linked_props = dict() - - # Check for known properties - for k in [li.PROP_NAME, li.PROP_PATH, li.PROP_RECURSE]: - if k not in sidx: - # we're missing a property - return None - linked_props[k] = sidx[k] - - # all children saved in the config file are pushed based - linked_props[li.PROP_MODEL] = li.PV_MODEL_PUSH - - # make sure the name is valid - try: - lin = li.LinkedImageName(linked_props[li.PROP_NAME]) - except apx.MalformedLinkedImageName: - # invalid child image name - return None - linked_props[li.PROP_NAME] = lin - - # check if this image is already defined - if lin in self.linked_children: - # duplicate child linked image data, first copy wins - return None - - return linked_props - - def read_publisher(self, sname, sec_idx): - # sname is the section of the config file. - # publisher block has alias, prefix, origin, and mirrors - - # Add 'origin' to list of origins if it doesn't exist already. - origins = sec_idx.get("origins", []) - origin = sec_idx.get("origin", None) - if origin and origin not in origins: - origins.append(origin) - - mirrors = sec_idx.get("mirrors", []) - - # [origin|mirror]_info dictionaries map URIs to a list of dicts - # containing property values for that URI. (we allow a single - # origin to have several dictionaries with different properties) - # As we go, we crosscheck against the lists of known - # origins/mirrors. - # - # For now, the only property tracked on a per-origin/mirror - # basis is which proxy (if any) is used to access it. In the - # future, we may wish to store additional information per-URI, - # eg. - # {"proxy": "", "ssl_key": "", "ssl_cert": ""} - # - # Storing multiple dictionaries, means we could store several - # proxies or key/value pairs per URI if necessary. This list - # of dictionaries is intentionally flat, so that the underlying - # configuration file is easily human-readable. - # - origin_info = {} - mirror_info = {} - for repouri_info, prop, orig in [ - (origin_info, "origin_info", origins), - (mirror_info, "mirror_info", mirrors)]: - # get the list of dictionaries from the cfg - plist = sec_idx.get(prop, []) - if not plist: - continue - for uri_info in plist: - uri = uri_info["uri"] - proxy = uri_info.get("proxy") - disabled = uri_info.get("disabled", False) - # Convert a "" proxy value to None - if proxy == "": - proxy = None - - # if the uri isn't in either 'origins' or - # 'mirrors', then we've likely deleted it - # using an older pkg(7) client format. We - # must ignore this entry. - if uri not in orig: - continue - - repouri_info.setdefault(uri, []).append( - {"uri": uri, "proxy": proxy, - "disabled": disabled}) - - props = {} - for k, v in six.iteritems(sec_idx): - if not k.startswith("property."): - continue - prop_name = k[len("property."):] - if v == DEF_TOKEN: - # Discard publisher properties with the - # DEF_TOKEN value; allow the publisher class to - # handle these. - self.remove_property(sname, k) - continue - props[prop_name] = v - - # Load repository data. - repo_data = {} - for key, val in six.iteritems(sec_idx): - if key.startswith("repo."): - pname = key[len("repo."):] - repo_data[pname] = val - - # Normalize/sanitize repository data. - for attr in ("collection_type", "sort_policy"): - if not repo_data[attr]: - # Assume default value for attr. - del repo_data[attr] - - if repo_data["refresh_seconds"] == "": - repo_data["refresh_seconds"] = \ - str(REPO_REFRESH_SECONDS_DEFAULT) - - prefix = sec_idx["prefix"] - ssl_key = sec_idx["ssl_key"] - ssl_cert = sec_idx["ssl_cert"] - - r = publisher.Repository(**repo_data) - - # Set per-origin/mirror URI properties - for (uri_list, info_map, repo_add_func) in [ - (origins, origin_info, r.add_origin), - (mirrors, mirror_info, r.add_mirror)]: - for uri in uri_list: - # If we didn't gather property information for - # this origin/mirror, assume a single - # origin/mirror with no proxy. - plist = info_map.get(uri, [{}]) - proxies = [] - disabled = False - for uri_info in plist: - proxy = uri_info.get("proxy") - if uri_info.get("disabled") == "true": - disabled = True - if proxy: - if proxy == \ - publisher.SYSREPO_PROXY: - p = publisher.ProxyURI( - None, system=True) - else: - p = publisher.ProxyURI( - proxy) - proxies.append(p) - - if not any(uri.startswith(scheme + ":") for - scheme in publisher.SSL_SCHEMES): - repouri = publisher.RepositoryURI(uri, - proxies=proxies, disabled=disabled) - else: - repouri = publisher.RepositoryURI(uri, - ssl_cert=ssl_cert, ssl_key=ssl_key, - proxies=proxies, disabled=disabled) - repo_add_func(repouri) - - pub = publisher.Publisher(prefix, alias=sec_idx["alias"], - client_uuid=sec_idx["uuid"], - client_uuid_time=sec_idx["last_uuid"], - disabled=sec_idx["disabled"], - repository=r, sticky=sec_idx.get("sticky", True), - props=props, - revoked_ca_certs=sec_idx.get("revoked_ca_certs", []), - approved_ca_certs=sec_idx.get("approved_ca_certs", [])) - - if pub.client_uuid != sec_idx["uuid"]: - # Publisher has generated new uuid; ensure configuration - # matches. - self.set_property(sname, "uuid", pub.client_uuid) - self.set_property(sname, "last_uuid", - pub.client_uuid_time) - - return prefix, pub - - def __validate_properties(self): - """Check that properties are consistent with each other.""" + p.sys_pub = True + + # Create a dictionary mapping publisher prefix to publisher + # object while merging user configured origins into system + # publishers. + res = {} + for p in sys_cfg.publishers: + res[p] = sys_cfg.publishers[p] + for p in img_cfg.publishers.values(): + assert isinstance(p, publisher.Publisher) + if p.prefix in res: + repo = p.repository + srepo = res[p.prefix].repository + # We do not allow duplicate URIs for either + # origins or mirrors, so must check whether the + # system publisher already provides + # a path to each user-configured origin/mirror. + # If so, we do not add the user-configured + # origin or mirror. + for o in repo.origins: + if not srepo.has_origin(o): + srepo.add_origin(o) + for m in repo.mirrors: + if not srepo.has_mirror(m): + srepo.add_mirror(m) + else: + res[p.prefix] = p + + new_pubs = set(sys_cfg.publishers.keys()) + old_pubs = set(old_sysconfig.publishers.keys()) + + # Find the system publishers which appeared or vanished. This + # is needed so that the catalog information can be rebuilt. + added_pubs = new_pubs - old_pubs + removed_pubs = old_pubs - new_pubs + + added_pubs = [res[p] for p in added_pubs] + removed_pubs = [ + old_sysconfig.publishers[p] for p in removed_pubs | disabled_pubs + ] + modified_pubs = [old_sysconfig.publishers[p] for p in modified_pubs] + return (res, added_pubs, removed_pubs, modified_pubs) + + def write_sys_cfg(self): + # Write out the new system publisher configuration. + if self.__write_sys_cfg: + self.sys_cfg.write() + + def write(self): + """Update the image configuration to reflect any changes made, + then write it.""" + + for p in self.__publishers.values(): + if not p.sys_pub: + self.img_cfg.publishers[p.prefix] = p + continue + + # If we had previous user-configuration for this + # publisher, only store non-system publisher changes + repo = p.repository + sticky = p.sticky + user_origins = [o for o in repo.origins if not o.system] + system_origins = [o for o in repo.origins if o.system] + user_mirrors = [o for o in repo.mirrors if not o.system] + system_mirrors = [o for o in repo.mirrors if o.system] + old_origins = [] + old_mirrors = [] + + # look for any previously set configuration + if p.prefix in self.img_cfg.publishers: + old_pub = self.img_cfg.publishers[p.prefix] + old_origins = old_pub.repository.origins + old_mirrors = old_pub.repository.mirrors + sticky = old_pub.sticky + + # Preserve any origins configured in the image, + # but masked by the same origin also provided + # by the system-repository. + user_origins = user_origins + [ + o for o in old_origins if o in system_origins + ] + user_mirrors = user_mirrors + [ + o for o in old_mirrors if o in system_mirrors + ] - try: - polval = self.get_property("property", SIGNATURE_POLICY) - except cfg.PropertyConfigError: - # If it hasn't been set yet, there's nothing to - # validate. - return - - if polval == "require-names": - signames = self.get_property("property", - "signature-required-names") - if not signames: - raise apx.InvalidPropertyValue(_( - "At least one name must be provided for " - "the signature-required-names policy.")) - - def __publisher_getdefault(self, name, value): - """Support getdefault() on properties""" - return self.__publishers.get(name, value) - - # properties so we can enforce rules - publishers = DictProperty(__get_publisher, __set_publisher, - __del_publisher, __publisher_iteritems, __publisher_keys, - __publisher_values, __publisher_iter, - doc="A dict mapping publisher prefixes to publisher objects", - fgetdefault=__publisher_getdefault, ) + # no user changes, so nothing new to write + if set(user_origins) == set(old_origins) and set( + user_mirrors + ) == set(old_mirrors): + continue + # store a publisher with this configuration + user_pub = publisher.Publisher(prefix=p.prefix, sticky=sticky) + user_pub.repository = publisher.Repository() + user_pub.repository.origins = user_origins + user_pub.repository.mirrors = user_mirrors + self.img_cfg.publishers[p.prefix] = user_pub -class NullSystemPublisher(object): - """Dummy system publisher object for use when an image doesn't use a - system publisher.""" + # Write out the image configuration. + self.img_cfg.write() - # property.proxied-urls is here for backwards compatibility, it is no - # longer used by pkg(7) - __supported_props = ("publisher-search-order", "property.proxied-urls", - SIGNATURE_POLICY, "signature-required-names") + def allowed_to_move(self, pub): + """Return whether a publisher is allowed to move in the search + order.""" - def __init__(self): - self.publishers = {} - self.__props = dict([(p, []) for p in self.__supported_props]) - self.__props[SIGNATURE_POLICY] = \ - default_policies[SIGNATURE_POLICY] + return not self.__is_sys_pub(pub) - def write(self): - return + def add_property_value(self, *args, **kwargs): + return self.img_cfg.add_property_value(*args, **kwargs) - def get_property(self, section, name): - """Return the value of the property if the NullSystemPublisher - has any knowledge of it.""" + def remove_property_value(self, *args, **kwargs): + return self.img_cfg.remove_property_value(*args, **kwargs) - if section == "property" and \ - name in self.__supported_props: - return self.__props[name] - raise NotImplementedError() + def get_index(self): + return self.img_cfg.get_index() - def set_property(self, section, name, value): - if section == "property" and name in self.__supported_props: - self.__props[name] = value - self.__validate_properties() - return - raise NotImplementedError() + def get_policy(self, *args, **kwargs): + return self.img_cfg.get_policy(*args, **kwargs) - def __validate_properties(self): - """Check that properties are consistent with each other.""" + def get_policy_str(self, *args, **kwargs): + return self.img_cfg.get_policy_str(*args, **kwargs) - try: - polval = self.get_property("property", SIGNATURE_POLICY) - except cfg.PropertyConfigError: - # If it hasn't been set yet, there's nothing to - # validate. - return - - if polval == "require-names": - signames = self.get_property("property", - "signature-required-names") - if not signames: - raise apx.InvalidPropertyValue(_( - "At least one name must be provided for " - "the signature-required-names policy.")) - - def set_properties(self, properties): - """Set multiple properties at one time.""" - - if list(properties.keys()) != ["property"]: - raise NotImplementedError - props = properties["property"] - if not all(k in self.__supported_props for k in props): - raise NotImplementedError() - self.__props.update(props) - self.__validate_properties() + def get_property(self, section, name): + # If the property being retrieved is the publisher search order, + # it's necessary to merge the information from the image + # configuration and the system configuration. + if section == "property" and name == "publisher-search-order": + res = self.sys_cfg.get_property(section, name) + enabled_sys_pubs = [ + p for p in res if not self.sys_cfg.publishers[p].disabled + ] + img_pubs = [ + s + for s in self.img_cfg.get_property(section, name) + if s not in enabled_sys_pubs + ] + disabled_sys_pubs = [ + p + for p in res + if self.sys_cfg.publishers[p].disabled and p not in img_pubs + ] + return enabled_sys_pubs + img_pubs + disabled_sys_pubs + if section == "property" and name in self.__system_override_properties: + return self.sys_cfg.get_property(section, name) + return self.img_cfg.get_property(section, name) + def remove_property(self, *args, **kwargs): + return self.img_cfg.remove_property(*args, **kwargs) -class BlendedConfig(object): - """Class which handles combining the system repository configuration - with the image configuration.""" + def set_property(self, *args, **kwargs): + return self.img_cfg.set_property(*args, **kwargs) - def __init__(self, img_cfg, pkg_counts, imgdir, transport, - use_system_pub): - """The 'img_cfg' parameter is the ImageConfig object for the - image. + def set_properties(self, *args, **kwargs): + return self.img_cfg.set_properties(*args, **kwargs) - The 'pkg_counts' parameter is a list of tuples which contains - the number of packages each publisher has installed. + @property + def target(self): + return self.img_cfg.target - The 'imgdir' parameter is the directory the current image - resides in. + @property + def variants(self): + return self.img_cfg.variants - The 'transport' object is the image's transport. + def __get_mediators(self): + return self.img_cfg.mediators - The 'use_system_pub' parameter is a boolean which indicates - whether the system publisher should be used.""" + def __set_mediators(self, mediators): + self.img_cfg.mediators = mediators - self.img_cfg = img_cfg - self.__pkg_counts = pkg_counts + mediators = property(__get_mediators, __set_mediators) - self.__proxy_url = None + def __get_facets(self): + return self.img_cfg.facets - syscfg_path = os.path.join(imgdir, "pkg5.syspub") - # load the existing system repo config - if os.path.exists(syscfg_path): - old_sysconfig = ImageConfig(syscfg_path, None) - else: - old_sysconfig = NullSystemPublisher() - - # A tuple of properties whose value should be taken from the - # system repository configuration and not the image - # configuration. - self.__system_override_properties = (SIGNATURE_POLICY, - "signature-required-names") - - self.__write_sys_cfg = True - if use_system_pub: - # get new syspub data from sysdepot - try: - self.__proxy_url = os.environ["PKG_SYSREPO_URL"] - if not self.__proxy_url.startswith("http://"): - self.__proxy_url = "http://" + \ - self.__proxy_url - except KeyError: - try: - host = smf.get_prop( - "application/pkg/zones-proxy-client", - "config/listen_host") - port = smf.get_prop( - "application/pkg/zones-proxy-client", - "config/listen_port") - except smf.NonzeroExitException as e: - # If we can't get information out of - # smf, try using pkg/sysrepo. - try: - host = smf.get_prop( - "application/pkg/system-repository:default", - "config/host") - host = "localhost" - port = smf.get_prop( - "application/pkg/system-repository:default", - "config/port") - except smf.NonzeroExitException as e: - raise apx.UnknownSysrepoConfiguration() - self.__proxy_url = "http://{0}:{1}".format(host, port) - # We use system=True so that we don't try to retrieve - # runtime $http_proxy environment variables in - # pkg.client.publisher.TransportRepoURI.__get_runtime_proxy(..) - # See also how 'system' is handled in - # pkg.client.transport.engine.CurlTransportEngine.__setup_handle(..) - # pkg.client.transport.repo.get_syspub_info(..) - sysdepot_uri = publisher.RepositoryURI(self.__proxy_url, - system=True) - assert sysdepot_uri.get_host() - try: - pubs, props = transport.get_syspub_data( - sysdepot_uri) - except TransportFailures: - self.sys_cfg = old_sysconfig - self.__write_sys_cfg = False - else: - try: - try: - # Try to remove any previous - # system repository - # configuration. - portable.remove(syscfg_path) - except OSError as e: - if e.errno == errno.ENOENT: - # Check to see whether - # we'll be able to write - # the configuration - # later. - with open(syscfg_path, - "wb") as fh: - fh.close() - self.sys_cfg = \ - ImageConfig( - syscfg_path, None) - else: - raise - except OSError as e: - if e.errno in \ - (errno.EACCES, errno.EROFS): - # A permissions error means that - # either we couldn't remove the - # existing configuration or - # create a new configuration in - # that place. In that case, use - # an in-memory only version of - # the ImageConfig. - self.sys_cfg = \ - NullSystemPublisher() - self.__write_sys_cfg = False - else: - raise - else: - # The previous configuration was - # successfully removed, so use that - # location for the new ImageConfig. - self.sys_cfg = \ - ImageConfig(syscfg_path, None, - sysrepo_proxy=True) - for p in pubs: - assert not p.disabled, "System " \ - "publisher {0} was unexpectedly " \ - "marked disabled in system " \ - "configuration.".format(p.prefix) - self.sys_cfg.publishers[p.prefix] = p - - self.sys_cfg.set_property("property", - "publisher-search-order", - props["publisher-search-order"]) - # A dictionary is used to change both of these - # properties at once since setting the - # signature-policy to require-names without - # having any require-names set will cause - # property validation to fail. - d = {} - if SIGNATURE_POLICY in props: - d.setdefault("property", {})[ - SIGNATURE_POLICY] = props[ - SIGNATURE_POLICY] - if "signature-required-names" in props: - d.setdefault("property", {})[ - "signature-required-names"] = props[ - "signature-required-names"] - if d: - self.sys_cfg.set_properties(d) - else: - self.sys_cfg = NullSystemPublisher() - self.__system_override_properties = () - - self.__publishers, self.added_pubs, self.removed_pubs, \ - self.modified_pubs = \ - self.__merge_publishers(self.img_cfg, self.sys_cfg, - pkg_counts, old_sysconfig, self.__proxy_url) - - @staticmethod - def __merge_publishers(img_cfg, sys_cfg, pkg_counts, old_sysconfig, - proxy_url): - """This function merges an old publisher configuration from the - system repository with the new publisher configuration from the - system repository. It returns a tuple containing a dictionary - mapping prefix to publisher, the publisher objects for the newly - added system publishers, and the publisher objects for the - system publishers which were removed. - - The 'img_cfg' parameter is the ImageConfig object for the - image. - - The 'sys_cfg' parameter is the ImageConfig object containing the - publisher configuration from the system repository. - - The 'pkg_counts' parameter is a list of tuples which contains - the number of packages each publisher has installed. - - The 'old_sysconfig' parameter is ImageConfig object containing - the previous publisher configuration from the system repository. - - The 'proxy_url' parameter is the url for the system repository. - """ - - pubs_with_installed_pkgs = set() - for prefix, cnt, ver_cnt in pkg_counts: - if cnt > 0: - pubs_with_installed_pkgs.add(prefix) - - # keep track of old system publishers which are becoming - # disabled image publishers (because they have packages - # installed). - disabled_pubs = set() - - # Merge in previously existing system publishers which have - # installed packages. - for prefix in old_sysconfig.get_property("property", - "publisher-search-order"): - if prefix in sys_cfg.publishers or \ - prefix in img_cfg.publishers or \ - prefix not in pubs_with_installed_pkgs: - continue - - # only report this publisher as disabled if it wasn't - # previously reported and saved as disabled. - if not old_sysconfig.publishers[prefix].disabled: - disabled_pubs |= set([prefix]) - - sys_cfg.publishers[prefix] = \ - old_sysconfig.publishers[prefix] - sys_cfg.publishers[prefix].disabled = True - - # if a syspub publisher is no longer available then - # remove all the origin and mirror information - # associated with that publisher. - sys_cfg.publishers[prefix].repository.origins = [] - sys_cfg.publishers[prefix].repository.mirrors = [] - - # check if any system publisher have had origin changes. - modified_pubs = set() - for prefix in set(old_sysconfig.publishers) & \ - set(sys_cfg.publishers): - pold = old_sysconfig.publishers[prefix] - pnew = sys_cfg.publishers[prefix] - if list(map(str, pold.repository.origins)) != \ - list(map(str, pnew.repository.origins)): - modified_pubs |= set([prefix]) - - if proxy_url: - # We must replace the temporary "system" proxy with the - # real URL of the system-repository. - system_proxy = publisher.ProxyURI(None, system=True) - real_system_proxy = publisher.ProxyURI(proxy_url) - for p in sys_cfg.publishers.values(): - for o in p.repository.origins: - o.system = True - try: - i = o.proxies.index(system_proxy) - o.proxies[i] = real_system_proxy - except ValueError: - pass - - for m in p.repository.mirrors: - m.system = True - try: - i = m.proxies.index(system_proxy) - m.proxies[i] = real_system_proxy - except ValueError: - pass - p.sys_pub = True - - # Create a dictionary mapping publisher prefix to publisher - # object while merging user configured origins into system - # publishers. - res = {} - for p in sys_cfg.publishers: - res[p] = sys_cfg.publishers[p] - for p in img_cfg.publishers.values(): - assert isinstance(p, publisher.Publisher) - if p.prefix in res: - repo = p.repository - srepo = res[p.prefix].repository - # We do not allow duplicate URIs for either - # origins or mirrors, so must check whether the - # system publisher already provides - # a path to each user-configured origin/mirror. - # If so, we do not add the user-configured - # origin or mirror. - for o in repo.origins: - if not srepo.has_origin(o): - srepo.add_origin(o) - for m in repo.mirrors: - if not srepo.has_mirror(m): - srepo.add_mirror(m) - else: - res[p.prefix] = p - - new_pubs = set(sys_cfg.publishers.keys()) - old_pubs = set(old_sysconfig.publishers.keys()) + def __set_facets(self, facets): + self.img_cfg.facets = facets - # Find the system publishers which appeared or vanished. This - # is needed so that the catalog information can be rebuilt. - added_pubs = new_pubs - old_pubs - removed_pubs = old_pubs - new_pubs + facets = property(__get_facets, __set_facets) - added_pubs = [res[p] for p in added_pubs] - removed_pubs = [ - old_sysconfig.publishers[p] - for p in removed_pubs | disabled_pubs - ] - modified_pubs = [ - old_sysconfig.publishers[p] - for p in modified_pubs - ] - return (res, added_pubs, removed_pubs, modified_pubs) - - def write_sys_cfg(self): - # Write out the new system publisher configuration. - if self.__write_sys_cfg: - self.sys_cfg.write() - - def write(self): - """Update the image configuration to reflect any changes made, - then write it.""" - - for p in self.__publishers.values(): - - if not p.sys_pub: - self.img_cfg.publishers[p.prefix] = p - continue - - # If we had previous user-configuration for this - # publisher, only store non-system publisher changes - repo = p.repository - sticky = p.sticky - user_origins = [o for o in repo.origins if not o.system] - system_origins = [o for o in repo.origins if o.system] - user_mirrors = [o for o in repo.mirrors if not o.system] - system_mirrors = [o for o in repo.mirrors if o.system] - old_origins = [] - old_mirrors = [] - - # look for any previously set configuration - if p.prefix in self.img_cfg.publishers: - old_pub = self.img_cfg.publishers[p.prefix] - old_origins = old_pub.repository.origins - old_mirrors = old_pub.repository.mirrors - sticky = old_pub.sticky - - # Preserve any origins configured in the image, - # but masked by the same origin also provided - # by the system-repository. - user_origins = user_origins + \ - [o for o in old_origins - if o in system_origins] - user_mirrors = user_mirrors + \ - [o for o in old_mirrors - if o in system_mirrors] - - # no user changes, so nothing new to write - if set(user_origins) == set(old_origins) and \ - set(user_mirrors) == set(old_mirrors): - continue - - # store a publisher with this configuration - user_pub = publisher.Publisher(prefix=p.prefix, - sticky=sticky) - user_pub.repository = publisher.Repository() - user_pub.repository.origins = user_origins - user_pub.repository.mirrors = user_mirrors - self.img_cfg.publishers[p.prefix] = user_pub - - # Write out the image configuration. - self.img_cfg.write() - - def allowed_to_move(self, pub): - """Return whether a publisher is allowed to move in the search - order.""" - - return not self.__is_sys_pub(pub) - - def add_property_value(self, *args, **kwargs): - return self.img_cfg.add_property_value(*args, **kwargs) - - def remove_property_value(self, *args, **kwargs): - return self.img_cfg.remove_property_value(*args, **kwargs) - - def get_index(self): - return self.img_cfg.get_index() - - def get_policy(self, *args, **kwargs): - return self.img_cfg.get_policy(*args, **kwargs) - - def get_policy_str(self, *args, **kwargs): - return self.img_cfg.get_policy_str(*args, **kwargs) - - def get_property(self, section, name): - # If the property being retrieved is the publisher search order, - # it's necessary to merge the information from the image - # configuration and the system configuration. - if section == "property" and name == "publisher-search-order": - res = self.sys_cfg.get_property(section, name) - enabled_sys_pubs = [ - p for p in res - if not self.sys_cfg.publishers[p].disabled - ] - img_pubs = [ - s for s in self.img_cfg.get_property(section, name) - if s not in enabled_sys_pubs - ] - disabled_sys_pubs = [ - p for p in res - if self.sys_cfg.publishers[p].disabled and \ - p not in img_pubs - ] - return enabled_sys_pubs + img_pubs + disabled_sys_pubs - if section == "property" and name in \ - self.__system_override_properties: - return self.sys_cfg.get_property(section, name) - return self.img_cfg.get_property(section, name) - - def remove_property(self, *args, **kwargs): - return self.img_cfg.remove_property(*args, **kwargs) - - def set_property(self, *args, **kwargs): - return self.img_cfg.set_property(*args, **kwargs) - - def set_properties(self, *args, **kwargs): - return self.img_cfg.set_properties(*args, **kwargs) - - @property - def target(self): - return self.img_cfg.target - - @property - def variants(self): - return self.img_cfg.variants - - def __get_mediators(self): - return self.img_cfg.mediators - - def __set_mediators(self, mediators): - self.img_cfg.mediators = mediators - - mediators = property(__get_mediators, __set_mediators) - - def __get_facets(self): - return self.img_cfg.facets - - def __set_facets(self, facets): - self.img_cfg.facets = facets - - facets = property(__get_facets, __set_facets) - - def __get_linked_children(self): - return self.img_cfg.linked_children + def __get_linked_children(self): + return self.img_cfg.linked_children - def __set_linked_children(self, linked_children): - self.img_cfg.linked_children = linked_children + def __set_linked_children(self, linked_children): + self.img_cfg.linked_children = linked_children - linked_children = property(__get_linked_children, - __set_linked_children) + linked_children = property(__get_linked_children, __set_linked_children) - def __is_sys_pub(self, prefix): - """Return whether the publisher with the prefix 'prefix' is a - system publisher.""" - - return prefix in self.sys_cfg.publishers + def __is_sys_pub(self, prefix): + """Return whether the publisher with the prefix 'prefix' is a + system publisher.""" - def remove_publisher(self, prefix): - try: - del self.publishers[prefix] - except KeyError: - pass + return prefix in self.sys_cfg.publishers + + def remove_publisher(self, prefix): + try: + del self.publishers[prefix] + except KeyError: + pass + + def change_publisher_search_order(self, being_moved, staying_put, after): + """Change the publisher search order by moving the publisher + 'being_moved' relative to the publisher 'staying put.' The + boolean 'after' determines whether 'being_moved' is placed before + or after 'staying_put'.""" + + if being_moved == staying_put: + raise apx.MoveRelativeToSelf() + + if self.__is_sys_pub(being_moved): + raise apx.ModifyingSyspubException( + _( + "Publisher '{0}' " + "is a system publisher and cannot be moved." + ).format(being_moved) + ) + if self.__is_sys_pub(staying_put): + raise apx.ModifyingSyspubException( + _( + "Publisher '{0}' " + "is a system publisher and other publishers cannot " + "be moved relative to it." + ).format(staying_put) + ) + self.img_cfg.change_publisher_search_order( + being_moved, staying_put, after + ) + + def reset(self, overrides=misc.EmptyDict): + """Discards current configuration state and returns the + configuration object to its initial state. + + 'overrides' is an optional dictionary of property values indexed + by section name and property name. If provided, it will be used + to override any default values initially assigned during reset. + """ + + self.img_cfg.reset(overrides) + self.sys_cfg.reset() + old_sysconfig = ImageConfig(os.path.join(imgdir, "pkg5.syspub"), None) + ( + self.__publishers, + self.added_pubs, + self.removed_pubs, + self.modified_pubs, + ) = self.__merge_publishers( + self.img_cfg, self.sys_cfg, self.__pkg_counts, old_sysconfig + ) + + def __get_publisher(self, prefix): + """Accessor method for publishers dictionary""" + return self.__publishers[prefix] + + def __set_publisher(self, prefix, pubobj): + """Accessor method to keep search order correct on insert""" + pval = self.get_property("property", "publisher-search-order") + if prefix not in pval: + self.add_property_value( + "property", "publisher-search-order", prefix + ) + self.__publishers[prefix] = pubobj + + def __del_publisher(self, prefix): + """Accessor method for publishers""" + if self.__is_sys_pub(prefix): + raise apx.ModifyingSyspubException( + _("{0} is a system " "publisher and cannot be unset.").format( + prefix + ) + ) + + del self.img_cfg.publishers[prefix] + del self.__publishers[prefix] + + def __publisher_iter(self): + return self.__publishers.__iter__() + + def __publisher_iteritems(self): + """Support iteritems on publishers""" + return six.iteritems(self.__publishers) + + def __publisher_keys(self): + """Support keys() on publishers""" + return list(self.__publishers.keys()) + + def __publisher_values(self): + """Support values() on publishers""" + return list(self.__publishers.values()) + + # properties so we can enforce rules and manage two potentially + # overlapping sets of publishers + publishers = DictProperty( + __get_publisher, + __set_publisher, + __del_publisher, + __publisher_iteritems, + __publisher_keys, + __publisher_values, + __publisher_iter, + doc="A dict mapping publisher prefixes to publisher objects", + ) - def change_publisher_search_order(self, being_moved, staying_put, - after): - """Change the publisher search order by moving the publisher - 'being_moved' relative to the publisher 'staying put.' The - boolean 'after' determines whether 'being_moved' is placed before - or after 'staying_put'.""" - - if being_moved == staying_put: - raise apx.MoveRelativeToSelf() - - if self.__is_sys_pub(being_moved): - raise apx.ModifyingSyspubException(_("Publisher '{0}' " - "is a system publisher and cannot be moved.").format( - being_moved)) - if self.__is_sys_pub(staying_put): - raise apx.ModifyingSyspubException(_("Publisher '{0}' " - "is a system publisher and other publishers cannot " - "be moved relative to it.").format(staying_put)) - self.img_cfg.change_publisher_search_order(being_moved, - staying_put, after) - - def reset(self, overrides=misc.EmptyDict): - """Discards current configuration state and returns the - configuration object to its initial state. - - 'overrides' is an optional dictionary of property values indexed - by section name and property name. If provided, it will be used - to override any default values initially assigned during reset. - """ - - self.img_cfg.reset(overrides) - self.sys_cfg.reset() - old_sysconfig = ImageConfig(os.path.join(imgdir, "pkg5.syspub"), - None) - self.__publishers, self.added_pubs, self.removed_pubs, \ - self.modified_pubs = \ - self.__merge_publishers(self.img_cfg, - self.sys_cfg, self.__pkg_counts, old_sysconfig) - - def __get_publisher(self, prefix): - """Accessor method for publishers dictionary""" - return self.__publishers[prefix] - - def __set_publisher(self, prefix, pubobj): - """Accessor method to keep search order correct on insert""" - pval = self.get_property("property", "publisher-search-order") - if prefix not in pval: - self.add_property_value("property", - "publisher-search-order", prefix) - self.__publishers[prefix] = pubobj - - def __del_publisher(self, prefix): - """Accessor method for publishers""" - if self.__is_sys_pub(prefix): - raise apx.ModifyingSyspubException(_("{0} is a system " - "publisher and cannot be unset.").format(prefix)) - - del self.img_cfg.publishers[prefix] - del self.__publishers[prefix] - - def __publisher_iter(self): - return self.__publishers.__iter__() - - def __publisher_iteritems(self): - """Support iteritems on publishers""" - return six.iteritems(self.__publishers) - - def __publisher_keys(self): - """Support keys() on publishers""" - return list(self.__publishers.keys()) - - def __publisher_values(self): - """Support values() on publishers""" - return list(self.__publishers.values()) - - # properties so we can enforce rules and manage two potentially - # overlapping sets of publishers - publishers = DictProperty(__get_publisher, __set_publisher, - __del_publisher, __publisher_iteritems, __publisher_keys, - __publisher_values, __publisher_iter, - doc="A dict mapping publisher prefixes to publisher objects") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/imageplan.py b/src/modules/client/imageplan.py index 4e4ba77c4..3ac01adc2 100644 --- a/src/modules/client/imageplan.py +++ b/src/modules/client/imageplan.py @@ -48,6 +48,7 @@ from functools import cmp_to_key, reduce from pkg.client import global_settings + logger = global_settings.logger import pkg.actions @@ -74,6488 +75,6886 @@ from pkg.client.debugvalues import DebugValues from pkg.client.plandesc import _ActionPlan from pkg.mediator import mediator_impl_matches -from pkg.client.pkgdefs import (PKG_OP_DEHYDRATE, PKG_OP_REHYDRATE, MSG_ERROR, - MSG_WARNING, MSG_INFO, MSG_GENERAL, MSG_UNPACKAGED, PKG_OP_VERIFY) +from pkg.client.pkgdefs import ( + PKG_OP_DEHYDRATE, + PKG_OP_REHYDRATE, + MSG_ERROR, + MSG_WARNING, + MSG_INFO, + MSG_GENERAL, + MSG_UNPACKAGED, + PKG_OP_VERIFY, +) def _reorder_hardlinks(hardlinks): - """Re-order the list of hardlinks to handle hard links whose - target is another hard link.""" + """Re-order the list of hardlinks to handle hard links whose + target is another hard link.""" - reordered = [] + reordered = [] - # Capture the paths for all given hardlinks. - paths = [hardlink.dst.attrs["path"] for hardlink in hardlinks] + # Capture the paths for all given hardlinks. + paths = [hardlink.dst.attrs["path"] for hardlink in hardlinks] - def add_targets(path, hardlinks, reordered): - """Find those hardlinks whose target is path.""" + def add_targets(path, hardlinks, reordered): + """Find those hardlinks whose target is path.""" - srcs = [l for l in hardlinks - if l.dst.get_target_path() == path] - for hardlink in srcs: - hardlinks.remove(hardlink) - reordered.append(hardlink) - # ... process hardlinks whose dst is _this_ hardlink. - add_targets(hardlink.dst.attrs["path"], hardlinks, - reordered) + srcs = [l for l in hardlinks if l.dst.get_target_path() == path] + for hardlink in srcs: + hardlinks.remove(hardlink) + reordered.append(hardlink) + # ... process hardlinks whose dst is _this_ hardlink. + add_targets(hardlink.dst.attrs["path"], hardlinks, reordered) - # Find hardlinks whose dst is _not_ another hardlink. - unchained = [l for l in hardlinks - if l.dst.get_target_path() not in paths] - for hardlink in unchained: - hardlinks.remove(hardlink) - reordered.append(hardlink) - # ... process hardlinks whose target is _this_ hardlink. - add_targets(hardlink.dst.attrs["path"], hardlinks, - reordered) + # Find hardlinks whose dst is _not_ another hardlink. + unchained = [l for l in hardlinks if l.dst.get_target_path() not in paths] + for hardlink in unchained: + hardlinks.remove(hardlink) + reordered.append(hardlink) + # ... process hardlinks whose target is _this_ hardlink. + add_targets(hardlink.dst.attrs["path"], hardlinks, reordered) - # Append remaining hardlinks (likely circular or otherwise broken). - reordered.extend(hardlinks) + # Append remaining hardlinks (likely circular or otherwise broken). + reordered.extend(hardlinks) - return reordered + return reordered class ImagePlan(object): - """ImagePlan object contains the plan for changing the image... - there are separate routines for planning the various types of - image modifying operations; evaluation (comparing manifests - and building lists of removal, install and update actions - and their execution is all common code""" - - MATCH_ALL = 0 - MATCH_INST_VERSIONS = 1 - MATCH_INST_STEMS = 2 - MATCH_UNINSTALLED = 3 - - def __init__(self, image, op, progtrack, check_cancel, noexecute=False, - pd=None): - - self.image = image - self.__progtrack = progtrack - self.__check_cancel = check_cancel - self.__noexecute = noexecute - - # The set of processed target object directories known to be - # valid (those that are not symlinks and thus are valid for - # use during installation). This is used by the pkg.actions - # classes during install() operations. - self.valid_directories = set() - - # A place to keep info about saved_files; needed by file action. - self.saved_files = {} - - self.__target_install_count = 0 - self.__target_update_count = 0 - self.__target_removal_count = 0 - - self.__directories = None # implement ref counting - self.__symlinks = None # for dirs and links and - self.__hardlinks = None # hardlinks - self.__exclude_re = None - self.__licenses = None - self.__legacy = None - self.__cached_actions = {} - self.__fixups = {} - self.operations_pubs = None # pubs being operated in hydrate - - self.invalid_meds = defaultdict(set) # targetless mediated links - self.__old_excludes = image.list_excludes() - self.__new_excludes = self.__old_excludes - - self.__preexecuted_indexing_error = None - self.__match_inst = {} # dict of fmri -> pattern - self.__match_rm = {} # dict of fmri -> pattern - self.__match_update = {} # dict of fmri -> pattern - - self.__pkg_actuators = set() - self._retrieved = set() - - self.pd = None - if pd is None: - pd = plandesc.PlanDescription(op) - assert(pd._op == op) - self.__setup_plan(pd) - - def __str__(self): - - if self.pd.state == plandesc.UNEVALUATED: - s = "UNEVALUATED:\n" - return s - - s = "{0}\n".format(self.pd._solver_summary) - - if self.pd.state < plandesc.EVALUATED_PKGS: - return s - - s += "Package version changes:\n" - - for oldfmri, newfmri in self.pd._fmri_changes: - s += "{0} -> {1}\n".format(oldfmri, newfmri) - - if self.pd._actuators: - s = s + "\nActuators:\n{0}\n".format(self.pd._actuators) - - if self.__old_excludes != self.__new_excludes: - s += "\nVariants/Facet changes:\n {0}".format( - "\n".join(self.pd.get_varcets())) - - if self.pd._mediators_change: - s = s + "\nMediator changes:\n {0}".format( - "\n".join(self.pd.get_mediators())) - - return s - - def __setup_plan(self, plan): - assert plan.state in [ - plandesc.UNEVALUATED, plandesc.EVALUATED_PKGS, - plandesc.EVALUATED_OK] - - self.pd = plan - self.__update_avail_space() - - # make sure we init this even if we don't call solver - self.pd._new_avoid_obs = (self.image.avoid_set_get(), - self.image.avoid_set_get(implicit=True), - self.image.obsolete_set_get()) - - if self.pd.state == plandesc.UNEVALUATED: - self.image.linked.init_plan(plan) - return - - # figure out excludes - self.__new_excludes = self.image.list_excludes( - self.pd._new_variants, self.pd._new_facets) - - # tell the linked image subsystem about this plan - self.image.linked.setup_plan(plan) - - for pp in self.pd.pkg_plans: - pp.image = self.image - if pp.origin_fmri and pp.destination_fmri: - self.__target_update_count += 1 - elif pp.destination_fmri: - self.__target_install_count += 1 - elif pp.origin_fmri: - self.__target_removal_count += 1 - - def skip_preexecute(self): - assert self.pd.state in \ - [plandesc.PREEXECUTED_OK, plandesc.EVALUATED_OK], \ - "{0} not in [{1}, {2}]".format(self.pd.state, - plandesc.PREEXECUTED_OK, plandesc.EVALUATED_OK) - - if self.pd.state == plandesc.PREEXECUTED_OK: - # can't skip preexecute since we already preexecuted it - return - - if self.image.version != self.image.CURRENT_VERSION: - # Prevent plan execution if image format isn't current. - raise api_errors.ImageFormatUpdateNeeded( - self.image.root) - - if self.image.transport: - self.image.transport.shutdown() - - self.pd.state = plandesc.PREEXECUTED_OK - - @property - def state(self): - return self.pd.state - - @property - def planned_op(self): - """Returns a constant value indicating the type of operation - planned.""" - - return self.pd._op - - @property - def plan_desc(self): - """Get the proposed fmri changes.""" - return self.pd._fmri_changes - - def describe(self): - """Return a pointer to the plan description.""" - return self.pd - - @property - def bytes_added(self): - """get the (approx) number of bytes added""" - return self.pd._bytes_added - @property - def cbytes_added(self): - """get the (approx) number of bytes needed in download cache""" - return self.pd._cbytes_added - - @property - def bytes_avail(self): - """get the (approx) number of bytes space available""" - return self.pd._bytes_avail - @property - def cbytes_avail(self): - """get the (approx) number of download space available""" - return self.pd._cbytes_avail - - def __finish_plan(self, pdstate, fmri_changes=None): - """Private helper function that must be called at the end of - every planning operation to ensure final plan state is set and - any general post-plan work is performed.""" - - pd = self.pd - pd.state = pdstate - if not fmri_changes is None: - pd._fmri_changes = fmri_changes - - def __vector_2_fmri_changes(self, installed_dict, vector, - li_pkg_updates=True, new_variants=None, new_facets=None, - fmri_changes=None): - """Given an installed set of packages, and a proposed vector - of package changes determine what, if any, changes should be - made to the image. This takes into account different - behaviors during operations like variant changes, and updates - where the only packages being updated are linked image - constraints, etc.""" - - fmri_updates = [] - if fmri_changes is not None: - affected = [f[0] for f in fmri_changes] - else: - affected = None - - for a, b in ImagePlan.__dicts2fmrichanges(installed_dict, - ImagePlan.__fmris2dict(vector)): - if a != b: - fmri_updates.append((a, b)) - continue - - if (new_facets is not None or new_variants): - if affected is None or a in affected: - # If affected list of packages has not - # been predetermined for package fmris - # that are unchanged, or if the fmri - # exists in the list of affected - # packages, add it to the list. - fmri_updates.append((a, a)) - - # cache li_pkg_updates in the plan description for later - # evaluation - self.pd._li_pkg_updates = li_pkg_updates - - return fmri_updates - - def __plan_op(self): - """Private helper method used to mark the start of a planned - operation.""" - - self.pd._image_lm = self.image.get_last_modified(string=True) - - def __merge_inherited_facets(self, new_facets=None): - """Merge any new facets settings with (possibly changing) - inherited facets.""" - - if new_facets is not None: - # make sure we don't accidentally update the caller - # supplied facets. - new_facets = pkg.facet.Facets(new_facets) - - # we don't allow callers to specify inherited facets - # (they can only come from parent images.) - new_facets._clear_inherited() - - # get the existing image facets. - old_facets = self.image.cfg.facets - - if new_facets is None: - # the user did not request any facet changes, but we - # still need to see if inherited facets are changing. - # so set new_facets to the existing facet set with - # inherited facets removed. - new_facets = pkg.facet.Facets(old_facets) - new_facets._clear_inherited() - - # get the latest inherited facets and merge them into the user - # specified facets. - new_facets.update(self.image.linked.inherited_facets()) - - if new_facets == old_facets: - # there are no caller specified or inherited facet - # changes. - return (None, False, False) - - facet_change = bool(old_facets._cmp_values(new_facets)) or \ - bool(old_facets._cmp_priority(new_facets)) - masked_facet_change = bool(not facet_change) and \ - bool(old_facets._cmp_all_values(new_facets)) - - # Something better be changing. But if visible facets are - # changing we don't report masked facet changes. - assert facet_change != masked_facet_change - - return (new_facets, facet_change, masked_facet_change) - - def __evaluate_excludes(self, new_variants=None, new_facets=None, - dehydrate=None, rehydrate=None): - """Private helper function used to determine new facet and - variant state for image.""" - - # merge caller supplied and inherited facets - new_facets, facet_change, masked_facet_change = \ - self.__merge_inherited_facets(new_facets) - - # if we're changing variants or facets, save that to the plan. - if new_variants or facet_change or masked_facet_change: - self.pd._varcets_change = True - if new_variants: - # This particular data are passed as unicode - # instead of bytes in the child image due to the - # jsonrpclib update, so we use force_str here to - # reduce the pain in comparing json data type. - self.pd._new_variants = {} - for k, v in new_variants.items(): - self.pd._new_variants[misc.force_str(k)] = \ - misc.force_str(v) - else: - self.pd._new_variants = new_variants - self.pd._old_facets = self.image.cfg.facets - self.pd._new_facets = new_facets - self.pd._facet_change = facet_change - self.pd._masked_facet_change = masked_facet_change - - self.__new_excludes = self.image.list_excludes(new_variants, - new_facets) - - # Previously dehydrated publishers. - old_dehydrated = set(self.image.cfg.get_property("property", - "dehydrated")) - - # We only want to exclude all actions in the old image that - # belong to an already dehydrated publisher. - if old_dehydrated: - self.__old_excludes.append( - self.image.get_dehydrated_exclude_func( - old_dehydrated)) - - # Publishers to rehydrate - if rehydrate is None: - rehydrate = set() - rehydrate = set(rehydrate) - - # Publishers to dehydrate - if dehydrate is None: - dehydrate = set() - dehydrate = set(dehydrate) | (old_dehydrated - rehydrate) - - self.operations_pubs = sorted(dehydrate) - # Only allows actions in new image that cannot be dehydrated - # or that are in the dehydrate list and not in the rehydrate - # list. - if dehydrate: - self.__new_excludes.append( - self.image.get_dehydrated_exclude_func(dehydrate)) - - return (new_variants, new_facets, facet_change, - masked_facet_change) - - def __run_solver(self, solver_cb, retry_wo_parent_deps=True): - """Run the solver, and if it fails, optionally retry the - operation once while relaxing installed parent - dependencies.""" - - # have the solver try to satisfy parent dependencies. - ignore_inst_parent_deps = False - - # In some error cases, significant recursion may be required, - # and the default (1000) is not enough. In testing, this was - # found to be sufficient for the solver's needs. - prlimit = sys.getrecursionlimit() - if prlimit < 3000: - sys.setrecursionlimit(3000) - - try: - return solver_cb(ignore_inst_parent_deps) - except api_errors.PlanCreationException as e: - # if we're currently in sync don't retry the - # operation - if self.image.linked.insync(latest_md=False): - raise e - # if PKG_REQUIRE_SYNC is set in the - # environment we require an in-sync image. - if "PKG_REQUIRE_SYNC" in os.environ: - raise e - # caller doesn't want us to retry - if not retry_wo_parent_deps: - raise e - # we're an out-of-sync child image so retry - # this operation while ignoring parent - # dependencies for any installed packages. we - # do this so that users can manipulate out of - # sync images in an attempt to bring them back - # in sync. since we don't ignore parent - # dependencies for uninstalled packages, the - # user won't be able to take the image further - # out of sync. - ignore_inst_parent_deps = True - return solver_cb(ignore_inst_parent_deps) - finally: - # restore original recursion limit - sys.setrecursionlimit(prlimit) - - def __add_actuator(self, trigger_fmri, trigger_op, exec_op, values, - solver_inst, installed_dict): - """Add a single actuator to the solver 'solver_inst' and update - the plan. 'trigger_fmri' is pkg which triggered the operation - and is only used in the plan. 'trigger_op' is the name of the - operation which triggered the change, 'exec_op' is the name of - the operation which should be performed. - 'values' contains the fmris of the pkgs which should get - changed.""" - - if not isinstance(values, list): - values = [values] - - pub_ranks = self.image.get_publisher_ranks() - - matched_vals, unmatched = self.__match_user_fmris( - self.image, values, self.MATCH_INST_STEMS, - pub_ranks=pub_ranks, installed_pkgs=installed_dict, - raise_not_installed=False, - default_matcher=pkg.fmri.exact_name_match) - - triggered_fmris = set() - for m in matched_vals.values(): - triggered_fmris |= set(m) - - # Removals are done by stem so we have to make sure we only add - # removal FMRIs for versions which are actually installed. If - # the actuator specifies a version which is not installed, treat - # as nop. - # For updates, we have to remove versions which are already in - # the image because we don't want them in the proposed list for - # the solver. Otherwise we might trim on the installed version - # which prevents us from downgrading. - for t in triggered_fmris.copy(): - if (exec_op == pkgdefs.PKG_OP_UNINSTALL and - t not in installed_dict.values()) or \ - (exec_op != pkgdefs.PKG_OP_UNINSTALL - and t in installed_dict.values()): - triggered_fmris.remove(t) - continue - self.__pkg_actuators.add((trigger_fmri, t.pkg_name, - trigger_op, exec_op)) - - solver_inst.add_triggered_op(trigger_op, exec_op, - triggered_fmris) - - - def __decode_pkg_actuator_attrs(self, action, op): - """Read and decode pkg actuator data from action 'action'.""" - - # we ignore any non-supported operations - supported_exec_ops = [pkgdefs.PKG_OP_UPDATE, - pkgdefs.PKG_OP_UNINSTALL] - - if not action.attrs["name"].startswith("pkg.additional-"): - return - - # e.g.: set name=pkg.additional-update-on-uninstall value=... - try: - trigger_op = action.attrs["name"].split("-")[3] - exec_op = action.attrs["name"].split("-")[1] - except KeyError: - # Ignore invalid pkg actuators. - return - - if trigger_op != op or exec_op not in supported_exec_ops: - # Ignore unsupported pkg actuators. - return - - for f in action.attrlist("value"): - # Ignore values which are not valid FMRIs, we don't - # support globbing here. - try: - pkg.fmri.PkgFmri(f) - except pkg.fmri.IllegalFmri: - continue - yield (exec_op, f) - - def __set_pkg_actuators(self, patterns, op, solver_inst): - """Check the manifests for the pkgs specified by 'patterns' and - add them to the solver instance specified by 'solver_inst'. 'op' - defines the trigger operation which called this function.""" - - trigger_entries = {} - - ignore = DebugValues["ignore-pkg-actuators"] - if ignore and ignore.lower() == "true": - return - - # build installed dict - installed_dict = ImagePlan.__fmris2dict( - self.image.gen_installed_pkgs()) - pub_ranks = self.image.get_publisher_ranks() - - # Match only on installed stems. This makes sure no new pkgs - # will get installed when an update is specified. Note that - # this allows trailing matches (i.e. 'ambiguous') matches that - # may result in failure as the list of patterns are assumed to - # be from user input. - matched_vals, unmatched = self.__match_user_fmris( - self.image, patterns, self.MATCH_INST_VERSIONS, - pub_ranks=pub_ranks, installed_pkgs=installed_dict, - raise_not_installed=False) - - pfmris = set() - for m in matched_vals: - pfmris |= set(matched_vals[m]) - - for f in pfmris: - if not isinstance(f, pkg.fmri.PkgFmri): - f = pkg.fmri.PkgFmri(f) - for a in self.image.get_catalog( - self.image.IMG_CATALOG_INSTALLED).get_entry_actions( - f, [pkg.catalog.Catalog.SUMMARY]): - for exec_op, efmri in \ - self.__decode_pkg_actuator_attrs(a, op): - self.__add_actuator(f, op, - exec_op, efmri, solver_inst, - installed_dict) - - def __add_pkg_actuators_to_pd(self, user_pkgs): - """ Add pkg actuators to PlanDescription. Skip any changes which - would have been triggered by an actuator but were also requested - explicitly by the user to avoid confusion. """ - - for (tf, p, t, e) in self.__pkg_actuators: - for (before, after) in self.pd._fmri_changes: - if (before and before.pkg_name == p or - after and after.pkg_name == p) and \ - p not in user_pkgs: - self.pd.add_pkg_actuator(tf.pkg_name, e, - p) - - def __plan_install_solver(self, li_pkg_updates=True, li_sync_op=False, - new_facets=None, new_variants=None, pkgs_inst=None, - reject_list=misc.EmptyI, fmri_changes=None, exact_install=False): - """Use the solver to determine the fmri changes needed to - install the specified pkgs, sync the specified image, and/or - change facets/variants within the current image.""" - - # evaluate what varcet changes are required - new_variants, new_facets, \ - facet_change, masked_facet_change = \ - self.__evaluate_excludes(new_variants, new_facets) - - # check if we need to uninstall any packages. - uninstall = self.__any_reject_matches(reject_list) - - # check if anything is actually changing. - if not (li_sync_op or pkgs_inst or uninstall or - new_variants or facet_change or fmri_changes is not None): - # the solver is not necessary. - self.pd._fmri_changes = [] - return - - # get ranking of publishers - pub_ranks = self.image.get_publisher_ranks() - - # build installed dict - installed_dict = ImagePlan.__fmris2dict( - self.image.gen_installed_pkgs()) - - if reject_list: - reject_set = self.match_user_stems(self.image, - reject_list, self.MATCH_ALL) - else: - reject_set = set() - - if pkgs_inst: - inst_dict, references = self.__match_user_fmris( - self.image, pkgs_inst, self.MATCH_ALL, - pub_ranks=pub_ranks, installed_pkgs=installed_dict, - reject_set=reject_set) - self.__match_inst = references - else: - inst_dict = {} - - if new_variants: - variants = new_variants - else: - variants = self.image.get_variants() - - installed_dict_tmp = {} - # If exact_install is on, clear the installed_dict. - if exact_install: - installed_dict_tmp = installed_dict.copy() - installed_dict = {} - - def solver_cb(ignore_inst_parent_deps): - avoid_set = self.image.avoid_set_get() - frozen_list = self.image.get_frozen_list() - # If exact_install is on, ignore avoid_set and - # frozen_list. - if exact_install: - avoid_set = set() - frozen_list = [] - - # instantiate solver - solver = pkg_solver.PkgSolver( - self.image.get_catalog( - self.image.IMG_CATALOG_KNOWN), - installed_dict, - pub_ranks, - variants, - avoid_set, - self.image.linked.parent_fmris(), - self.__progtrack) - - if reject_list: - # use reject_list, not reject_set, to preserve - # input intent (e.g. 'pkg:/', '/' prefixes). - self.__set_pkg_actuators(reject_list, - pkgdefs.PKG_OP_UNINSTALL, solver) - - # run solver - new_vector, new_avoid_obs = \ - solver.solve_install( - frozen_list, - inst_dict, - new_variants=new_variants, - excludes=self.__new_excludes, - reject_set=reject_set, - trim_proposed_installed=False, - relax_all=li_sync_op, - ignore_inst_parent_deps=\ - ignore_inst_parent_deps, - exact_install=exact_install, - installed_dict_tmp=installed_dict_tmp) - - return solver, new_vector, new_avoid_obs - - # We can't retry this operation while ignoring parent - # dependencies if we're doing a linked image sync. - retry_wo_parent_deps = not li_sync_op - - # Solve; will raise exceptions if no solution is found. - solver, new_vector, self.pd._new_avoid_obs = \ - self.__run_solver(solver_cb, \ - retry_wo_parent_deps=retry_wo_parent_deps) - - # Restore the installed_dict for checking fmri changes. - if exact_install: - installed_dict = installed_dict_tmp.copy() - - self.pd._fmri_changes = self.__vector_2_fmri_changes( - installed_dict, new_vector, - li_pkg_updates=li_pkg_updates, - new_variants=new_variants, new_facets=new_facets, - fmri_changes=fmri_changes) - - self.__add_pkg_actuators_to_pd(reject_set) - - self.pd._solver_summary = str(solver) - if DebugValues["plan"]: - self.pd._solver_errors = solver.get_trim_errors() - - def __plan_install(self, li_pkg_updates=True, li_sync_op=False, - new_facets=None, new_variants=None, pkgs_inst=None, - reject_list=misc.EmptyI): - """Determine the fmri changes needed to install the specified - pkgs, sync the image, and/or change facets/variants within the - current image.""" - - self.__plan_op() - self.__plan_install_solver( - li_pkg_updates=li_pkg_updates, - li_sync_op=li_sync_op, - new_facets=new_facets, - new_variants=new_variants, - pkgs_inst=pkgs_inst, - reject_list=reject_list) - self.__finish_plan(plandesc.EVALUATED_PKGS) - - def __plan_exact_install(self, li_pkg_updates=True, li_sync_op=False, - new_facets=None, new_variants=None, pkgs_inst=None, - reject_list=misc.EmptyI): - """Determine the fmri changes needed to install exactly the - specified pkgs, sync the image, and/or change facets/variants - within the current image.""" - - self.__plan_op() - self.__plan_install_solver( - li_pkg_updates=li_pkg_updates, - li_sync_op=li_sync_op, - new_facets=new_facets, - new_variants=new_variants, - pkgs_inst=pkgs_inst, - reject_list=reject_list, - exact_install=True) - self.__finish_plan(plandesc.EVALUATED_PKGS) - - def set_be_options(self, backup_be, backup_be_name, new_be, - be_activate, be_name): - self.pd._backup_be = backup_be - self.pd._backup_be_name = backup_be_name - self.pd._new_be = new_be - self.pd._be_activate = be_activate - self.pd._be_name = be_name - - def __set_update_index(self, value): - self.pd._update_index = value - - def __get_update_index(self): - return self.pd._update_index - - update_index = property(__get_update_index, __set_update_index) - - def plan_install(self, pkgs_inst=None, reject_list=misc.EmptyI): - """Determine the fmri changes needed to install the specified - pkgs""" - - self.__plan_install(pkgs_inst=pkgs_inst, - reject_list=reject_list) - - def plan_exact_install(self, pkgs_inst=None, reject_list=misc.EmptyI): - """Determine the fmri changes needed to install exactly the - specified pkgs""" - - self.__plan_exact_install(pkgs_inst=pkgs_inst, - reject_list=reject_list) - - def __get_attr_fmri_changes(self, get_mattrs): - # Attempt to optimize package planning by determining which - # packages are actually affected by changing attributes (e.g., - # facets, variants). This also provides an accurate list of - # affected packages as a side effect (normally, all installed - # packages are seen as changed). This assumes that facets and - # variants are not both changing at the same time. - use_solver = False - cat = self.image.get_catalog( - self.image.IMG_CATALOG_INSTALLED) - cat_info = frozenset([cat.DEPENDENCY]) - - fmri_changes = [] - pt = self.__progtrack - rem_pkgs = self.image.count_installed_pkgs() - - pt.plan_start(pt.PLAN_PKGPLAN, goal=rem_pkgs) - for f in self.image.gen_installed_pkgs(): - m = self.image.get_manifest(f, - ignore_excludes=True) - - # Get the list of attributes involved in this operation - # that the package uses and that have changed. - use_solver, mattrs = get_mattrs(m, use_solver) - if not mattrs: - # Changed attributes unused. - pt.plan_add_progress(pt.PLAN_PKGPLAN) - rem_pkgs -= 1 - continue - - # Changed attributes are used in this package. - fmri_changes.append((f, f)) - - # If any dependency actions are tagged with one - # of the changed attributes, assume the solver - # must be used. - for act in cat.get_entry_actions(f, cat_info): - for attr in mattrs: - if use_solver: - break - if (act.name == "depend" and - attr in act.attrs): - use_solver = True - break - if use_solver: - break - - rem_pkgs -= 1 - pt.plan_add_progress(pt.PLAN_PKGPLAN) - - pt.plan_done(pt.PLAN_PKGPLAN) - pt.plan_all_done() - - return use_solver, fmri_changes - - def __facet_change_fastpath(self): - """The following optimizations only work correctly if only - facets are changing (not variants, uninstalls, etc).""" - - old_facets = self.pd._old_facets - new_facets = self.pd._new_facets - - # List of changed facets are those that have a new value, - # and those that have been removed. - changed_facets = [ - f - for f in new_facets - if f not in old_facets or \ - old_facets[f] != new_facets[f] - ] - changed_facets.extend( - f - for f in old_facets - if f not in new_facets + """ImagePlan object contains the plan for changing the image... + there are separate routines for planning the various types of + image modifying operations; evaluation (comparing manifests + and building lists of removal, install and update actions + and their execution is all common code""" + + MATCH_ALL = 0 + MATCH_INST_VERSIONS = 1 + MATCH_INST_STEMS = 2 + MATCH_UNINSTALLED = 3 + + def __init__( + self, image, op, progtrack, check_cancel, noexecute=False, pd=None + ): + self.image = image + self.__progtrack = progtrack + self.__check_cancel = check_cancel + self.__noexecute = noexecute + + # The set of processed target object directories known to be + # valid (those that are not symlinks and thus are valid for + # use during installation). This is used by the pkg.actions + # classes during install() operations. + self.valid_directories = set() + + # A place to keep info about saved_files; needed by file action. + self.saved_files = {} + + self.__target_install_count = 0 + self.__target_update_count = 0 + self.__target_removal_count = 0 + + self.__directories = None # implement ref counting + self.__symlinks = None # for dirs and links and + self.__hardlinks = None # hardlinks + self.__exclude_re = None + self.__licenses = None + self.__legacy = None + self.__cached_actions = {} + self.__fixups = {} + self.operations_pubs = None # pubs being operated in hydrate + + self.invalid_meds = defaultdict(set) # targetless mediated links + self.__old_excludes = image.list_excludes() + self.__new_excludes = self.__old_excludes + + self.__preexecuted_indexing_error = None + self.__match_inst = {} # dict of fmri -> pattern + self.__match_rm = {} # dict of fmri -> pattern + self.__match_update = {} # dict of fmri -> pattern + + self.__pkg_actuators = set() + self._retrieved = set() + + self.pd = None + if pd is None: + pd = plandesc.PlanDescription(op) + assert pd._op == op + self.__setup_plan(pd) + + def __str__(self): + if self.pd.state == plandesc.UNEVALUATED: + s = "UNEVALUATED:\n" + return s + + s = "{0}\n".format(self.pd._solver_summary) + + if self.pd.state < plandesc.EVALUATED_PKGS: + return s + + s += "Package version changes:\n" + + for oldfmri, newfmri in self.pd._fmri_changes: + s += "{0} -> {1}\n".format(oldfmri, newfmri) + + if self.pd._actuators: + s = s + "\nActuators:\n{0}\n".format(self.pd._actuators) + + if self.__old_excludes != self.__new_excludes: + s += "\nVariants/Facet changes:\n {0}".format( + "\n".join(self.pd.get_varcets()) + ) + + if self.pd._mediators_change: + s = s + "\nMediator changes:\n {0}".format( + "\n".join(self.pd.get_mediators()) + ) + + return s + + def __setup_plan(self, plan): + assert plan.state in [ + plandesc.UNEVALUATED, + plandesc.EVALUATED_PKGS, + plandesc.EVALUATED_OK, + ] + + self.pd = plan + self.__update_avail_space() + + # make sure we init this even if we don't call solver + self.pd._new_avoid_obs = ( + self.image.avoid_set_get(), + self.image.avoid_set_get(implicit=True), + self.image.obsolete_set_get(), + ) + + if self.pd.state == plandesc.UNEVALUATED: + self.image.linked.init_plan(plan) + return + + # figure out excludes + self.__new_excludes = self.image.list_excludes( + self.pd._new_variants, self.pd._new_facets + ) + + # tell the linked image subsystem about this plan + self.image.linked.setup_plan(plan) + + for pp in self.pd.pkg_plans: + pp.image = self.image + if pp.origin_fmri and pp.destination_fmri: + self.__target_update_count += 1 + elif pp.destination_fmri: + self.__target_install_count += 1 + elif pp.origin_fmri: + self.__target_removal_count += 1 + + def skip_preexecute(self): + assert self.pd.state in [ + plandesc.PREEXECUTED_OK, + plandesc.EVALUATED_OK, + ], "{0} not in [{1}, {2}]".format( + self.pd.state, plandesc.PREEXECUTED_OK, plandesc.EVALUATED_OK + ) + + if self.pd.state == plandesc.PREEXECUTED_OK: + # can't skip preexecute since we already preexecuted it + return + + if self.image.version != self.image.CURRENT_VERSION: + # Prevent plan execution if image format isn't current. + raise api_errors.ImageFormatUpdateNeeded(self.image.root) + + if self.image.transport: + self.image.transport.shutdown() + + self.pd.state = plandesc.PREEXECUTED_OK + + @property + def state(self): + return self.pd.state + + @property + def planned_op(self): + """Returns a constant value indicating the type of operation + planned.""" + + return self.pd._op + + @property + def plan_desc(self): + """Get the proposed fmri changes.""" + return self.pd._fmri_changes + + def describe(self): + """Return a pointer to the plan description.""" + return self.pd + + @property + def bytes_added(self): + """get the (approx) number of bytes added""" + return self.pd._bytes_added + + @property + def cbytes_added(self): + """get the (approx) number of bytes needed in download cache""" + return self.pd._cbytes_added + + @property + def bytes_avail(self): + """get the (approx) number of bytes space available""" + return self.pd._bytes_avail + + @property + def cbytes_avail(self): + """get the (approx) number of download space available""" + return self.pd._cbytes_avail + + def __finish_plan(self, pdstate, fmri_changes=None): + """Private helper function that must be called at the end of + every planning operation to ensure final plan state is set and + any general post-plan work is performed.""" + + pd = self.pd + pd.state = pdstate + if not fmri_changes is None: + pd._fmri_changes = fmri_changes + + def __vector_2_fmri_changes( + self, + installed_dict, + vector, + li_pkg_updates=True, + new_variants=None, + new_facets=None, + fmri_changes=None, + ): + """Given an installed set of packages, and a proposed vector + of package changes determine what, if any, changes should be + made to the image. This takes into account different + behaviors during operations like variant changes, and updates + where the only packages being updated are linked image + constraints, etc.""" + + fmri_updates = [] + if fmri_changes is not None: + affected = [f[0] for f in fmri_changes] + else: + affected = None + + for a, b in ImagePlan.__dicts2fmrichanges( + installed_dict, ImagePlan.__fmris2dict(vector) + ): + if a != b: + fmri_updates.append((a, b)) + continue + + if new_facets is not None or new_variants: + if affected is None or a in affected: + # If affected list of packages has not + # been predetermined for package fmris + # that are unchanged, or if the fmri + # exists in the list of affected + # packages, add it to the list. + fmri_updates.append((a, a)) + + # cache li_pkg_updates in the plan description for later + # evaluation + self.pd._li_pkg_updates = li_pkg_updates + + return fmri_updates + + def __plan_op(self): + """Private helper method used to mark the start of a planned + operation.""" + + self.pd._image_lm = self.image.get_last_modified(string=True) + + def __merge_inherited_facets(self, new_facets=None): + """Merge any new facets settings with (possibly changing) + inherited facets.""" + + if new_facets is not None: + # make sure we don't accidentally update the caller + # supplied facets. + new_facets = pkg.facet.Facets(new_facets) + + # we don't allow callers to specify inherited facets + # (they can only come from parent images.) + new_facets._clear_inherited() + + # get the existing image facets. + old_facets = self.image.cfg.facets + + if new_facets is None: + # the user did not request any facet changes, but we + # still need to see if inherited facets are changing. + # so set new_facets to the existing facet set with + # inherited facets removed. + new_facets = pkg.facet.Facets(old_facets) + new_facets._clear_inherited() + + # get the latest inherited facets and merge them into the user + # specified facets. + new_facets.update(self.image.linked.inherited_facets()) + + if new_facets == old_facets: + # there are no caller specified or inherited facet + # changes. + return (None, False, False) + + facet_change = bool(old_facets._cmp_values(new_facets)) or bool( + old_facets._cmp_priority(new_facets) + ) + masked_facet_change = bool(not facet_change) and bool( + old_facets._cmp_all_values(new_facets) + ) + + # Something better be changing. But if visible facets are + # changing we don't report masked facet changes. + assert facet_change != masked_facet_change + + return (new_facets, facet_change, masked_facet_change) + + def __evaluate_excludes( + self, new_variants=None, new_facets=None, dehydrate=None, rehydrate=None + ): + """Private helper function used to determine new facet and + variant state for image.""" + + # merge caller supplied and inherited facets + ( + new_facets, + facet_change, + masked_facet_change, + ) = self.__merge_inherited_facets(new_facets) + + # if we're changing variants or facets, save that to the plan. + if new_variants or facet_change or masked_facet_change: + self.pd._varcets_change = True + if new_variants: + # This particular data are passed as unicode + # instead of bytes in the child image due to the + # jsonrpclib update, so we use force_str here to + # reduce the pain in comparing json data type. + self.pd._new_variants = {} + for k, v in new_variants.items(): + self.pd._new_variants[misc.force_str(k)] = misc.force_str(v) + else: + self.pd._new_variants = new_variants + self.pd._old_facets = self.image.cfg.facets + self.pd._new_facets = new_facets + self.pd._facet_change = facet_change + self.pd._masked_facet_change = masked_facet_change + + self.__new_excludes = self.image.list_excludes(new_variants, new_facets) + + # Previously dehydrated publishers. + old_dehydrated = set( + self.image.cfg.get_property("property", "dehydrated") + ) + + # We only want to exclude all actions in the old image that + # belong to an already dehydrated publisher. + if old_dehydrated: + self.__old_excludes.append( + self.image.get_dehydrated_exclude_func(old_dehydrated) + ) + + # Publishers to rehydrate + if rehydrate is None: + rehydrate = set() + rehydrate = set(rehydrate) + + # Publishers to dehydrate + if dehydrate is None: + dehydrate = set() + dehydrate = set(dehydrate) | (old_dehydrated - rehydrate) + + self.operations_pubs = sorted(dehydrate) + # Only allows actions in new image that cannot be dehydrated + # or that are in the dehydrate list and not in the rehydrate + # list. + if dehydrate: + self.__new_excludes.append( + self.image.get_dehydrated_exclude_func(dehydrate) + ) + + return (new_variants, new_facets, facet_change, masked_facet_change) + + def __run_solver(self, solver_cb, retry_wo_parent_deps=True): + """Run the solver, and if it fails, optionally retry the + operation once while relaxing installed parent + dependencies.""" + + # have the solver try to satisfy parent dependencies. + ignore_inst_parent_deps = False + + # In some error cases, significant recursion may be required, + # and the default (1000) is not enough. In testing, this was + # found to be sufficient for the solver's needs. + prlimit = sys.getrecursionlimit() + if prlimit < 3000: + sys.setrecursionlimit(3000) + + try: + return solver_cb(ignore_inst_parent_deps) + except api_errors.PlanCreationException as e: + # if we're currently in sync don't retry the + # operation + if self.image.linked.insync(latest_md=False): + raise e + # if PKG_REQUIRE_SYNC is set in the + # environment we require an in-sync image. + if "PKG_REQUIRE_SYNC" in os.environ: + raise e + # caller doesn't want us to retry + if not retry_wo_parent_deps: + raise e + # we're an out-of-sync child image so retry + # this operation while ignoring parent + # dependencies for any installed packages. we + # do this so that users can manipulate out of + # sync images in an attempt to bring them back + # in sync. since we don't ignore parent + # dependencies for uninstalled packages, the + # user won't be able to take the image further + # out of sync. + ignore_inst_parent_deps = True + return solver_cb(ignore_inst_parent_deps) + finally: + # restore original recursion limit + sys.setrecursionlimit(prlimit) + + def __add_actuator( + self, + trigger_fmri, + trigger_op, + exec_op, + values, + solver_inst, + installed_dict, + ): + """Add a single actuator to the solver 'solver_inst' and update + the plan. 'trigger_fmri' is pkg which triggered the operation + and is only used in the plan. 'trigger_op' is the name of the + operation which triggered the change, 'exec_op' is the name of + the operation which should be performed. + 'values' contains the fmris of the pkgs which should get + changed.""" + + if not isinstance(values, list): + values = [values] + + pub_ranks = self.image.get_publisher_ranks() + + matched_vals, unmatched = self.__match_user_fmris( + self.image, + values, + self.MATCH_INST_STEMS, + pub_ranks=pub_ranks, + installed_pkgs=installed_dict, + raise_not_installed=False, + default_matcher=pkg.fmri.exact_name_match, + ) + + triggered_fmris = set() + for m in matched_vals.values(): + triggered_fmris |= set(m) + + # Removals are done by stem so we have to make sure we only add + # removal FMRIs for versions which are actually installed. If + # the actuator specifies a version which is not installed, treat + # as nop. + # For updates, we have to remove versions which are already in + # the image because we don't want them in the proposed list for + # the solver. Otherwise we might trim on the installed version + # which prevents us from downgrading. + for t in triggered_fmris.copy(): + if ( + exec_op == pkgdefs.PKG_OP_UNINSTALL + and t not in installed_dict.values() + ) or ( + exec_op != pkgdefs.PKG_OP_UNINSTALL + and t in installed_dict.values() + ): + triggered_fmris.remove(t) + continue + self.__pkg_actuators.add( + (trigger_fmri, t.pkg_name, trigger_op, exec_op) + ) + + solver_inst.add_triggered_op(trigger_op, exec_op, triggered_fmris) + + def __decode_pkg_actuator_attrs(self, action, op): + """Read and decode pkg actuator data from action 'action'.""" + + # we ignore any non-supported operations + supported_exec_ops = [pkgdefs.PKG_OP_UPDATE, pkgdefs.PKG_OP_UNINSTALL] + + if not action.attrs["name"].startswith("pkg.additional-"): + return + + # e.g.: set name=pkg.additional-update-on-uninstall value=... + try: + trigger_op = action.attrs["name"].split("-")[3] + exec_op = action.attrs["name"].split("-")[1] + except KeyError: + # Ignore invalid pkg actuators. + return + + if trigger_op != op or exec_op not in supported_exec_ops: + # Ignore unsupported pkg actuators. + return + + for f in action.attrlist("value"): + # Ignore values which are not valid FMRIs, we don't + # support globbing here. + try: + pkg.fmri.PkgFmri(f) + except pkg.fmri.IllegalFmri: + continue + yield (exec_op, f) + + def __set_pkg_actuators(self, patterns, op, solver_inst): + """Check the manifests for the pkgs specified by 'patterns' and + add them to the solver instance specified by 'solver_inst'. 'op' + defines the trigger operation which called this function.""" + + trigger_entries = {} + + ignore = DebugValues["ignore-pkg-actuators"] + if ignore and ignore.lower() == "true": + return + + # build installed dict + installed_dict = ImagePlan.__fmris2dict(self.image.gen_installed_pkgs()) + pub_ranks = self.image.get_publisher_ranks() + + # Match only on installed stems. This makes sure no new pkgs + # will get installed when an update is specified. Note that + # this allows trailing matches (i.e. 'ambiguous') matches that + # may result in failure as the list of patterns are assumed to + # be from user input. + matched_vals, unmatched = self.__match_user_fmris( + self.image, + patterns, + self.MATCH_INST_VERSIONS, + pub_ranks=pub_ranks, + installed_pkgs=installed_dict, + raise_not_installed=False, + ) + + pfmris = set() + for m in matched_vals: + pfmris |= set(matched_vals[m]) + + for f in pfmris: + if not isinstance(f, pkg.fmri.PkgFmri): + f = pkg.fmri.PkgFmri(f) + for a in self.image.get_catalog( + self.image.IMG_CATALOG_INSTALLED + ).get_entry_actions(f, [pkg.catalog.Catalog.SUMMARY]): + for exec_op, efmri in self.__decode_pkg_actuator_attrs(a, op): + self.__add_actuator( + f, op, exec_op, efmri, solver_inst, installed_dict + ) + + def __add_pkg_actuators_to_pd(self, user_pkgs): + """Add pkg actuators to PlanDescription. Skip any changes which + would have been triggered by an actuator but were also requested + explicitly by the user to avoid confusion.""" + + for tf, p, t, e in self.__pkg_actuators: + for before, after in self.pd._fmri_changes: + if ( + before + and before.pkg_name == p + or after + and after.pkg_name == p + ) and p not in user_pkgs: + self.pd.add_pkg_actuator(tf.pkg_name, e, p) + + def __plan_install_solver( + self, + li_pkg_updates=True, + li_sync_op=False, + new_facets=None, + new_variants=None, + pkgs_inst=None, + reject_list=misc.EmptyI, + fmri_changes=None, + exact_install=False, + ): + """Use the solver to determine the fmri changes needed to + install the specified pkgs, sync the specified image, and/or + change facets/variants within the current image.""" + + # evaluate what varcet changes are required + ( + new_variants, + new_facets, + facet_change, + masked_facet_change, + ) = self.__evaluate_excludes(new_variants, new_facets) + + # check if we need to uninstall any packages. + uninstall = self.__any_reject_matches(reject_list) + + # check if anything is actually changing. + if not ( + li_sync_op + or pkgs_inst + or uninstall + or new_variants + or facet_change + or fmri_changes is not None + ): + # the solver is not necessary. + self.pd._fmri_changes = [] + return + + # get ranking of publishers + pub_ranks = self.image.get_publisher_ranks() + + # build installed dict + installed_dict = ImagePlan.__fmris2dict(self.image.gen_installed_pkgs()) + + if reject_list: + reject_set = self.match_user_stems( + self.image, reject_list, self.MATCH_ALL + ) + else: + reject_set = set() + + if pkgs_inst: + inst_dict, references = self.__match_user_fmris( + self.image, + pkgs_inst, + self.MATCH_ALL, + pub_ranks=pub_ranks, + installed_pkgs=installed_dict, + reject_set=reject_set, + ) + self.__match_inst = references + else: + inst_dict = {} + + if new_variants: + variants = new_variants + else: + variants = self.image.get_variants() + + installed_dict_tmp = {} + # If exact_install is on, clear the installed_dict. + if exact_install: + installed_dict_tmp = installed_dict.copy() + installed_dict = {} + + def solver_cb(ignore_inst_parent_deps): + avoid_set = self.image.avoid_set_get() + frozen_list = self.image.get_frozen_list() + # If exact_install is on, ignore avoid_set and + # frozen_list. + if exact_install: + avoid_set = set() + frozen_list = [] + + # instantiate solver + solver = pkg_solver.PkgSolver( + self.image.get_catalog(self.image.IMG_CATALOG_KNOWN), + installed_dict, + pub_ranks, + variants, + avoid_set, + self.image.linked.parent_fmris(), + self.__progtrack, + ) + + if reject_list: + # use reject_list, not reject_set, to preserve + # input intent (e.g. 'pkg:/', '/' prefixes). + self.__set_pkg_actuators( + reject_list, pkgdefs.PKG_OP_UNINSTALL, solver ) - def get_fattrs(m, use_solver): - # Get the list of facets involved in this - # operation that the package uses. To - # accurately determine which packages are - # actually being changed, we must compare the - # old effective value for each facet that is - # changing with its new effective value. - return use_solver, list( - f - for f in m.gen_facets( - excludes=self.__new_excludes, - patterns=changed_facets) - if new_facets[f] != old_facets[f] - ) - - return self.__get_attr_fmri_changes(get_fattrs) - - def __variant_change_fastpath(self): - """The following optimizations only work correctly if only - variants are changing (not facets, uninstalls, etc).""" - - nvariants = self.pd._new_variants - - def get_vattrs(m, use_solver): - # Get the list of variants involved in this - # operation that the package uses. - mvars = [] - for (variant, pvals) in m.gen_variants( - excludes=self.__new_excludes, - patterns=nvariants - ): - if nvariants[variant] not in pvals: - # If the new value for the - # variant is unsupported by this - # package, then the solver - # should be triggered so the - # package can be removed. - use_solver = True - mvars.append(variant) - return use_solver, mvars - - return self.__get_attr_fmri_changes(get_vattrs) - - def __get_publishers_with_repos(self, publishers=misc.EmptyI): - """Return publishers that have repositories configured. - - 'publishers' is an optional list of publisher prefixes to - limit the returned results to. - - A PlanCreationException will be raised if any of the publishers - specified do not exist, if any of the specified publishers have - no configured repositories, or if all known publishers have - no configured repositories.""" - - all_pubs = [ p.prefix for p in self.image.gen_publishers() ] - if not publishers: - if all_pubs: - publishers = all_pubs - else: - return misc.EmptyI - - configured_pubs = [ - pub.prefix - for pub in self.image.gen_publishers() - if pub.prefix in publishers and \ - (pub.repository and pub.repository.origins) - ] - - unconfigured_pubs = set(publishers) - set(configured_pubs) - if unconfigured_pubs: - raise api_errors.PlanCreationException( - no_repo_pubs=unconfigured_pubs) - - return configured_pubs - - def __plan_common_hydration(self, publishers, dehydrate=False): - self.__plan_op() - - # get publishers to dehydrate or rehydrate - pubs = self.__get_publishers_with_repos(publishers=publishers) - - if not pubs: - # Nothing to do. - self.__finish_plan(plandesc.EVALUATED_PKGS) - return - - # List of packages that will be modified. - fmri_changes = [ - (f, f) - for f in self.image.gen_installed_pkgs(pubs=pubs) - ] - - # Evaluate current facets / variants. - if dehydrate: - self.__evaluate_excludes(dehydrate=pubs) - else: - self.__evaluate_excludes(rehydrate=pubs) - - # If solver isn't involved, assume the list of packages - # has been determined. - assert fmri_changes is not None - self.__finish_plan(plandesc.EVALUATED_PKGS, - fmri_changes=fmri_changes) - - def plan_dehydrate(self, publishers=None): - """Dehydrate packages for given publishers. If no publishers - are specified, packages for all publishers with configured - repositories will be dehydrated.""" - - self.__plan_common_hydration(publishers, dehydrate=True) - - def plan_rehydrate(self, publishers=None): - """Rehydrate packages for given publishers. If no publishers - are specified, packages for all dehydrated publishers with - configured repositories will be rehydrated.""" - - self.__plan_common_hydration(publishers) - - def plan_change_varcets(self, new_facets=None, new_variants=None, - reject_list=misc.EmptyI): - """Determine the fmri changes needed to change the specified - facets/variants.""" - - self.__plan_op() - - # assume none of our optimizations will work. - fmri_changes = None - - # convenience function to invoke the solver. - def plan_install_solver(): - self.__plan_install_solver( - new_facets=new_facets, - new_variants=new_variants, - reject_list=reject_list, - fmri_changes=fmri_changes) - self.__finish_plan(plandesc.EVALUATED_PKGS) - - # evaluate what varcet changes are required - new_variants, new_facets, \ - facet_change, masked_facet_change = \ - self.__evaluate_excludes(new_variants, new_facets) - - # uninstalling packages requires the solver. - uninstall = self.__any_reject_matches(reject_list) - if uninstall: - plan_install_solver() - return - - # All operations (including varcet changes) need to try and - # keep linked images in sync. Linked image audits are fast, - # so do one now and if we're not in sync we need to invoke the - # solver. - if not self.image.linked.insync(): - plan_install_solver() - return - - # if facets and variants are changing at the same time, then - # we need to invoke the solver. - if new_variants and facet_change: - plan_install_solver() - return - - # By default, we assume the solver must be used. If any of the - # optimizations below can be applied, they'll determine whether - # the solver can be used. - use_solver = True - - # the following facet optimization only works if we're not - # changing variants at the same time. - if facet_change: - assert not new_variants - use_solver, fmri_changes = \ - self.__facet_change_fastpath() - - # the following variant optimization only works if we're not - # changing facets at the same time. - if new_variants: - assert not facet_change - use_solver, fmri_changes = \ - self.__variant_change_fastpath() - + # run solver + new_vector, new_avoid_obs = solver.solve_install( + frozen_list, + inst_dict, + new_variants=new_variants, + excludes=self.__new_excludes, + reject_set=reject_set, + trim_proposed_installed=False, + relax_all=li_sync_op, + ignore_inst_parent_deps=ignore_inst_parent_deps, + exact_install=exact_install, + installed_dict_tmp=installed_dict_tmp, + ) + + return solver, new_vector, new_avoid_obs + + # We can't retry this operation while ignoring parent + # dependencies if we're doing a linked image sync. + retry_wo_parent_deps = not li_sync_op + + # Solve; will raise exceptions if no solution is found. + solver, new_vector, self.pd._new_avoid_obs = self.__run_solver( + solver_cb, retry_wo_parent_deps=retry_wo_parent_deps + ) + + # Restore the installed_dict for checking fmri changes. + if exact_install: + installed_dict = installed_dict_tmp.copy() + + self.pd._fmri_changes = self.__vector_2_fmri_changes( + installed_dict, + new_vector, + li_pkg_updates=li_pkg_updates, + new_variants=new_variants, + new_facets=new_facets, + fmri_changes=fmri_changes, + ) + + self.__add_pkg_actuators_to_pd(reject_set) + + self.pd._solver_summary = str(solver) + if DebugValues["plan"]: + self.pd._solver_errors = solver.get_trim_errors() + + def __plan_install( + self, + li_pkg_updates=True, + li_sync_op=False, + new_facets=None, + new_variants=None, + pkgs_inst=None, + reject_list=misc.EmptyI, + ): + """Determine the fmri changes needed to install the specified + pkgs, sync the image, and/or change facets/variants within the + current image.""" + + self.__plan_op() + self.__plan_install_solver( + li_pkg_updates=li_pkg_updates, + li_sync_op=li_sync_op, + new_facets=new_facets, + new_variants=new_variants, + pkgs_inst=pkgs_inst, + reject_list=reject_list, + ) + self.__finish_plan(plandesc.EVALUATED_PKGS) + + def __plan_exact_install( + self, + li_pkg_updates=True, + li_sync_op=False, + new_facets=None, + new_variants=None, + pkgs_inst=None, + reject_list=misc.EmptyI, + ): + """Determine the fmri changes needed to install exactly the + specified pkgs, sync the image, and/or change facets/variants + within the current image.""" + + self.__plan_op() + self.__plan_install_solver( + li_pkg_updates=li_pkg_updates, + li_sync_op=li_sync_op, + new_facets=new_facets, + new_variants=new_variants, + pkgs_inst=pkgs_inst, + reject_list=reject_list, + exact_install=True, + ) + self.__finish_plan(plandesc.EVALUATED_PKGS) + + def set_be_options( + self, backup_be, backup_be_name, new_be, be_activate, be_name + ): + self.pd._backup_be = backup_be + self.pd._backup_be_name = backup_be_name + self.pd._new_be = new_be + self.pd._be_activate = be_activate + self.pd._be_name = be_name + + def __set_update_index(self, value): + self.pd._update_index = value + + def __get_update_index(self): + return self.pd._update_index + + update_index = property(__get_update_index, __set_update_index) + + def plan_install(self, pkgs_inst=None, reject_list=misc.EmptyI): + """Determine the fmri changes needed to install the specified + pkgs""" + + self.__plan_install(pkgs_inst=pkgs_inst, reject_list=reject_list) + + def plan_exact_install(self, pkgs_inst=None, reject_list=misc.EmptyI): + """Determine the fmri changes needed to install exactly the + specified pkgs""" + + self.__plan_exact_install(pkgs_inst=pkgs_inst, reject_list=reject_list) + + def __get_attr_fmri_changes(self, get_mattrs): + # Attempt to optimize package planning by determining which + # packages are actually affected by changing attributes (e.g., + # facets, variants). This also provides an accurate list of + # affected packages as a side effect (normally, all installed + # packages are seen as changed). This assumes that facets and + # variants are not both changing at the same time. + use_solver = False + cat = self.image.get_catalog(self.image.IMG_CATALOG_INSTALLED) + cat_info = frozenset([cat.DEPENDENCY]) + + fmri_changes = [] + pt = self.__progtrack + rem_pkgs = self.image.count_installed_pkgs() + + pt.plan_start(pt.PLAN_PKGPLAN, goal=rem_pkgs) + for f in self.image.gen_installed_pkgs(): + m = self.image.get_manifest(f, ignore_excludes=True) + + # Get the list of attributes involved in this operation + # that the package uses and that have changed. + use_solver, mattrs = get_mattrs(m, use_solver) + if not mattrs: + # Changed attributes unused. + pt.plan_add_progress(pt.PLAN_PKGPLAN) + rem_pkgs -= 1 + continue + + # Changed attributes are used in this package. + fmri_changes.append((f, f)) + + # If any dependency actions are tagged with one + # of the changed attributes, assume the solver + # must be used. + for act in cat.get_entry_actions(f, cat_info): + for attr in mattrs: + if use_solver: + break + if act.name == "depend" and attr in act.attrs: + use_solver = True + break if use_solver: - plan_install_solver() - return - - # If solver isn't involved, assume the list of packages - # has been determined. - assert fmri_changes is not None - self.__finish_plan(plandesc.EVALUATED_PKGS, - fmri_changes=fmri_changes) - - - def plan_set_mediators(self, new_mediators): - """Determine the changes needed to set the specified mediators. - - 'new_mediators' is a dict of dicts of the mediators to set - version and implementation for. It should be of the form: - - { - mediator-name: { - "implementation": mediator-implementation-string, - "version": mediator-version-string - } - } - - 'implementation' is an optional string that specifies the - implementation of the mediator for use in addition to or - instead of 'version'. A value of None will be interpreted - as requesting a reset of implementation to its optimal - default. - - 'version' is an optional string that specifies the version - (expressed as a dot-separated sequence of non-negative - integers) of the mediator for use. A value of None will be - interpreted as requesting a reset of version to its optimal - default. - """ - - self.__plan_op() - self.__evaluate_excludes() - - self.pd._mediators_change = True - self.pd._new_mediators = new_mediators - cfg_mediators = self.image.cfg.mediators - - pt = self.__progtrack - - pt.plan_start(pt.PLAN_MEDIATION_CHG) - # keys() is used since entries are deleted during iteration. - update_mediators = {} - for m in list(self.pd._new_mediators.keys()): - pt.plan_add_progress(pt.PLAN_MEDIATION_CHG) - for k in ("implementation", "version"): - if k in self.pd._new_mediators[m]: - if self.pd._new_mediators[m][k] is not None: - # Any mediators being set this - # way are forced to be marked as - # being set by local administrator. - self.pd._new_mediators[m]["{0}-source".format(k)] = \ - "local" - continue - - # Explicit reset requested. - del self.pd._new_mediators[m][k] - self.pd._new_mediators[m].pop( - "{0}-source".format(k), None) - if k == "implementation": - self.pd._new_mediators[m].pop( - "implementation-version", - None) - continue - - if m not in cfg_mediators: - # Nothing to do if not previously - # configured. - continue - - # If the mediator was configured by the local - # administrator, merge existing configuration. - # This is necessary since callers are only - # required to specify the components they want - # to change. - med_source = cfg_mediators[m].get("{0}-source".format(k)) - if med_source != "local": - continue - - self.pd._new_mediators[m][k] = \ - cfg_mediators[m].get(k) - self.pd._new_mediators[m]["{0}-source".format(k)] = "local" - - if k == "implementation" and \ - "implementation-version" in cfg_mediators[m]: - self.pd._new_mediators[m]["implementation-version"] = \ - cfg_mediators[m].get("implementation-version") - - if m not in cfg_mediators: - # mediation changed. - continue + break + + rem_pkgs -= 1 + pt.plan_add_progress(pt.PLAN_PKGPLAN) + + pt.plan_done(pt.PLAN_PKGPLAN) + pt.plan_all_done() + + return use_solver, fmri_changes + + def __facet_change_fastpath(self): + """The following optimizations only work correctly if only + facets are changing (not variants, uninstalls, etc).""" + + old_facets = self.pd._old_facets + new_facets = self.pd._new_facets + + # List of changed facets are those that have a new value, + # and those that have been removed. + changed_facets = [ + f + for f in new_facets + if f not in old_facets or old_facets[f] != new_facets[f] + ] + changed_facets.extend(f for f in old_facets if f not in new_facets) + + def get_fattrs(m, use_solver): + # Get the list of facets involved in this + # operation that the package uses. To + # accurately determine which packages are + # actually being changed, we must compare the + # old effective value for each facet that is + # changing with its new effective value. + return use_solver, list( + f + for f in m.gen_facets( + excludes=self.__new_excludes, patterns=changed_facets + ) + if new_facets[f] != old_facets[f] + ) + + return self.__get_attr_fmri_changes(get_fattrs) + + def __variant_change_fastpath(self): + """The following optimizations only work correctly if only + variants are changing (not facets, uninstalls, etc).""" + + nvariants = self.pd._new_variants + + def get_vattrs(m, use_solver): + # Get the list of variants involved in this + # operation that the package uses. + mvars = [] + for variant, pvals in m.gen_variants( + excludes=self.__new_excludes, patterns=nvariants + ): + if nvariants[variant] not in pvals: + # If the new value for the + # variant is unsupported by this + # package, then the solver + # should be triggered so the + # package can be removed. + use_solver = True + mvars.append(variant) + return use_solver, mvars + + return self.__get_attr_fmri_changes(get_vattrs) + + def __get_publishers_with_repos(self, publishers=misc.EmptyI): + """Return publishers that have repositories configured. + + 'publishers' is an optional list of publisher prefixes to + limit the returned results to. + + A PlanCreationException will be raised if any of the publishers + specified do not exist, if any of the specified publishers have + no configured repositories, or if all known publishers have + no configured repositories.""" + + all_pubs = [p.prefix for p in self.image.gen_publishers()] + if not publishers: + if all_pubs: + publishers = all_pubs + else: + return misc.EmptyI + + configured_pubs = [ + pub.prefix + for pub in self.image.gen_publishers() + if pub.prefix in publishers + and (pub.repository and pub.repository.origins) + ] + + unconfigured_pubs = set(publishers) - set(configured_pubs) + if unconfigured_pubs: + raise api_errors.PlanCreationException( + no_repo_pubs=unconfigured_pubs + ) + + return configured_pubs + + def __plan_common_hydration(self, publishers, dehydrate=False): + self.__plan_op() + + # get publishers to dehydrate or rehydrate + pubs = self.__get_publishers_with_repos(publishers=publishers) + + if not pubs: + # Nothing to do. + self.__finish_plan(plandesc.EVALUATED_PKGS) + return + + # List of packages that will be modified. + fmri_changes = [ + (f, f) for f in self.image.gen_installed_pkgs(pubs=pubs) + ] + + # Evaluate current facets / variants. + if dehydrate: + self.__evaluate_excludes(dehydrate=pubs) + else: + self.__evaluate_excludes(rehydrate=pubs) + + # If solver isn't involved, assume the list of packages + # has been determined. + assert fmri_changes is not None + self.__finish_plan(plandesc.EVALUATED_PKGS, fmri_changes=fmri_changes) + + def plan_dehydrate(self, publishers=None): + """Dehydrate packages for given publishers. If no publishers + are specified, packages for all publishers with configured + repositories will be dehydrated.""" + + self.__plan_common_hydration(publishers, dehydrate=True) + + def plan_rehydrate(self, publishers=None): + """Rehydrate packages for given publishers. If no publishers + are specified, packages for all dehydrated publishers with + configured repositories will be rehydrated.""" + + self.__plan_common_hydration(publishers) + + def plan_change_varcets( + self, new_facets=None, new_variants=None, reject_list=misc.EmptyI + ): + """Determine the fmri changes needed to change the specified + facets/variants.""" + + self.__plan_op() + + # assume none of our optimizations will work. + fmri_changes = None + + # convenience function to invoke the solver. + def plan_install_solver(): + self.__plan_install_solver( + new_facets=new_facets, + new_variants=new_variants, + reject_list=reject_list, + fmri_changes=fmri_changes, + ) + self.__finish_plan(plandesc.EVALUATED_PKGS) + + # evaluate what varcet changes are required + ( + new_variants, + new_facets, + facet_change, + masked_facet_change, + ) = self.__evaluate_excludes(new_variants, new_facets) + + # uninstalling packages requires the solver. + uninstall = self.__any_reject_matches(reject_list) + if uninstall: + plan_install_solver() + return + + # All operations (including varcet changes) need to try and + # keep linked images in sync. Linked image audits are fast, + # so do one now and if we're not in sync we need to invoke the + # solver. + if not self.image.linked.insync(): + plan_install_solver() + return + + # if facets and variants are changing at the same time, then + # we need to invoke the solver. + if new_variants and facet_change: + plan_install_solver() + return + + # By default, we assume the solver must be used. If any of the + # optimizations below can be applied, they'll determine whether + # the solver can be used. + use_solver = True + + # the following facet optimization only works if we're not + # changing variants at the same time. + if facet_change: + assert not new_variants + use_solver, fmri_changes = self.__facet_change_fastpath() + + # the following variant optimization only works if we're not + # changing facets at the same time. + if new_variants: + assert not facet_change + use_solver, fmri_changes = self.__variant_change_fastpath() + + if use_solver: + plan_install_solver() + return + + # If solver isn't involved, assume the list of packages + # has been determined. + assert fmri_changes is not None + self.__finish_plan(plandesc.EVALUATED_PKGS, fmri_changes=fmri_changes) + + def plan_set_mediators(self, new_mediators): + """Determine the changes needed to set the specified mediators. + + 'new_mediators' is a dict of dicts of the mediators to set + version and implementation for. It should be of the form: + + { + mediator-name: { + "implementation": mediator-implementation-string, + "version": mediator-version-string + } + } + + 'implementation' is an optional string that specifies the + implementation of the mediator for use in addition to or + instead of 'version'. A value of None will be interpreted + as requesting a reset of implementation to its optimal + default. + + 'version' is an optional string that specifies the version + (expressed as a dot-separated sequence of non-negative + integers) of the mediator for use. A value of None will be + interpreted as requesting a reset of version to its optimal + default. + """ + + self.__plan_op() + self.__evaluate_excludes() + + self.pd._mediators_change = True + self.pd._new_mediators = new_mediators + cfg_mediators = self.image.cfg.mediators + + pt = self.__progtrack + + pt.plan_start(pt.PLAN_MEDIATION_CHG) + # keys() is used since entries are deleted during iteration. + update_mediators = {} + for m in list(self.pd._new_mediators.keys()): + pt.plan_add_progress(pt.PLAN_MEDIATION_CHG) + for k in ("implementation", "version"): + if k in self.pd._new_mediators[m]: + if self.pd._new_mediators[m][k] is not None: + # Any mediators being set this + # way are forced to be marked as + # being set by local administrator. + self.pd._new_mediators[m][ + "{0}-source".format(k) + ] = "local" + continue + + # Explicit reset requested. + del self.pd._new_mediators[m][k] + self.pd._new_mediators[m].pop("{0}-source".format(k), None) + if k == "implementation": + self.pd._new_mediators[m].pop( + "implementation-version", None + ) + continue + + if m not in cfg_mediators: + # Nothing to do if not previously + # configured. + continue + + # If the mediator was configured by the local + # administrator, merge existing configuration. + # This is necessary since callers are only + # required to specify the components they want + # to change. + med_source = cfg_mediators[m].get("{0}-source".format(k)) + if med_source != "local": + continue + + self.pd._new_mediators[m][k] = cfg_mediators[m].get(k) + self.pd._new_mediators[m]["{0}-source".format(k)] = "local" + + if ( + k == "implementation" + and "implementation-version" in cfg_mediators[m] + ): + self.pd._new_mediators[m][ + "implementation-version" + ] = cfg_mediators[m].get("implementation-version") + + if m not in cfg_mediators: + # mediation changed. + continue + + # Determine if the only thing changing for mediations is + # whether configuration source is changing. If so, + # optimize planning by not loading any package data. + for k in ("implementation", "version"): + if self.pd._new_mediators[m].get(k) != cfg_mediators[m].get(k): + break + else: + if ( + self.pd._new_mediators[m].get("version-source") + != cfg_mediators[m].get("version-source") + ) or ( + self.pd._new_mediators[m].get("implementation-source") + != cfg_mediators[m].get("implementation-source") + ): + update_mediators[m] = self.pd._new_mediators[m] + del self.pd._new_mediators[m] + + if self.pd._new_mediators: + # Some mediations are changing, so merge the update only + # ones back in. + self.pd._new_mediators.update(update_mediators) + + # Determine which packages will be affected. + for f in self.image.gen_installed_pkgs(): + pt.plan_add_progress(pt.PLAN_MEDIATION_CHG) + m = self.image.get_manifest(f, ignore_excludes=True) + mediated = [] + for act in m.gen_actions_by_types( + ("hardlink", "link"), excludes=self.__new_excludes + ): + try: + mediator = act.attrs["mediator"] + except KeyError: + continue + if mediator in new_mediators: + mediated.append(act) + + if mediated: + pp = pkgplan.PkgPlan(self.image) + pp.propose_repair(f, m, mediated, misc.EmptyI) + pp.evaluate( + self.__new_excludes, + self.__new_excludes, + can_exclude=True, + ) + self.pd.pkg_plans.append(pp) + else: + # Only the source property is being updated for + # these mediators, so no packages needed loading. + self.pd._new_mediators = update_mediators + + pt.plan_done(pt.PLAN_MEDIATION_CHG) + self.__finish_plan(plandesc.EVALUATED_PKGS) + + def __any_reject_matches(self, reject_list): + """Check if any reject patterns match installed packages (in + which case a packaging operation should attempt to uninstall + those packages).""" + + # return true if any packages in reject list + # match any installed packages + return bool(reject_list) and bool( + self.match_user_stems( + self.image, + reject_list, + self.MATCH_INST_VERSIONS, + raise_not_installed=False, + ) + ) + + def plan_sync(self, li_pkg_updates=True, reject_list=misc.EmptyI): + """Determine the fmri changes needed to sync the image.""" + + self.__plan_op() + + # check if we need to uninstall any packages. + uninstall = self.__any_reject_matches(reject_list) + + # check if inherited facets are changing + new_facets = self.__evaluate_excludes()[1] + + # audits are fast, so do an audit to check if we're in sync. + insync = self.image.linked.insync() + + # if we're not trying to uninstall packages, and inherited + # facets are not changing, and we're already in sync, then + # don't bother invoking the solver. + if not uninstall and not new_facets is not None and insync: + # we don't need to do anything + self.__finish_plan(plandesc.EVALUATED_PKGS, fmri_changes=[]) + return + + self.__plan_install( + li_pkg_updates=li_pkg_updates, + li_sync_op=True, + reject_list=reject_list, + ) + + def plan_uninstall(self, pkgs_to_uninstall, ignore_missing=False): + self.__plan_op() + proposed_dict, self.__match_rm = self.__match_user_fmris( + self.image, + pkgs_to_uninstall, + self.MATCH_INST_VERSIONS, + raise_not_installed=not ignore_missing, + ) + + # merge patterns together + proposed_removals = set( + [f for each in proposed_dict.values() for f in each] + ) + + # check if inherited facets are changing + new_facets = self.__evaluate_excludes()[1] + + # build installed dict + installed_dict = ImagePlan.__fmris2dict(self.image.gen_installed_pkgs()) + + def solver_cb(ignore_inst_parent_deps): + # instantiate solver + solver = pkg_solver.PkgSolver( + self.image.get_catalog(self.image.IMG_CATALOG_KNOWN), + installed_dict, + self.image.get_publisher_ranks(), + self.image.get_variants(), + self.image.avoid_set_get(), + self.image.linked.parent_fmris(), + self.__progtrack, + ) + + # check for triggered ops + self.__set_pkg_actuators( + pkgs_to_uninstall, pkgdefs.PKG_OP_UNINSTALL, solver + ) + + # run solver + new_vector, new_avoid_obs = solver.solve_uninstall( + self.image.get_frozen_list(), + proposed_removals, + self.__new_excludes, + ignore_inst_parent_deps=ignore_inst_parent_deps, + ) + + return solver, new_vector, new_avoid_obs + + # Solve; will raise exceptions if no solution is found. + solver, new_vector, self.pd._new_avoid_obs = self.__run_solver( + solver_cb + ) + + self.pd._fmri_changes = self.__vector_2_fmri_changes( + installed_dict, new_vector, new_facets=new_facets + ) + + self.__add_pkg_actuators_to_pd([x.pkg_name for x in proposed_removals]) + + self.pd._solver_summary = str(solver) + if DebugValues["plan"]: + self.pd._solver_errors = solver.get_trim_errors() + + self.__finish_plan(plandesc.EVALUATED_PKGS) + + def __plan_update_solver( + self, pkgs_update=None, ignore_missing=False, reject_list=misc.EmptyI + ): + """Use the solver to determine the fmri changes needed to + update the specified pkgs or all packages if none were + specified.""" + + # check if inherited facets are changing + new_facets = self.__evaluate_excludes()[1] + + # get ranking of publishers + pub_ranks = self.image.get_publisher_ranks() + + # build installed dict + installed_dict = ImagePlan.__fmris2dict(self.image.gen_installed_pkgs()) + + # If specific packages or patterns were provided, then + # determine the proposed set to pass to the solver. + if reject_list: + reject_set = self.match_user_stems( + self.image, reject_list, self.MATCH_ALL + ) + else: + reject_set = set() + + if pkgs_update: + update_dict, references = self.__match_user_fmris( + self.image, + pkgs_update, + self.MATCH_INST_STEMS, + pub_ranks=pub_ranks, + installed_pkgs=installed_dict, + raise_not_installed=not ignore_missing, + reject_set=reject_set, + ) + self.__match_update = references + + def solver_cb(ignore_inst_parent_deps): + # instantiate solver + solver = pkg_solver.PkgSolver( + self.image.get_catalog(self.image.IMG_CATALOG_KNOWN), + installed_dict, + pub_ranks, + self.image.get_variants(), + self.image.avoid_set_get(), + self.image.linked.parent_fmris(), + self.__progtrack, + ) + + if reject_list: + # use reject_list, not reject_set, to preserve + # input intent (e.g. 'pkg:/', '/' prefixes). + self.__set_pkg_actuators( + reject_list, pkgdefs.PKG_OP_UNINSTALL, solver + ) - # Determine if the only thing changing for mediations is - # whether configuration source is changing. If so, - # optimize planning by not loading any package data. - for k in ("implementation", "version"): - if self.pd._new_mediators[m].get(k) != \ - cfg_mediators[m].get(k): - break - else: - if (self.pd._new_mediators[m].get("version-source") != \ - cfg_mediators[m].get("version-source")) or \ - (self.pd._new_mediators[m].get("implementation-source") != \ - cfg_mediators[m].get("implementation-source")): - update_mediators[m] = \ - self.pd._new_mediators[m] - del self.pd._new_mediators[m] - - if self.pd._new_mediators: - # Some mediations are changing, so merge the update only - # ones back in. - self.pd._new_mediators.update(update_mediators) - - # Determine which packages will be affected. - for f in self.image.gen_installed_pkgs(): - pt.plan_add_progress(pt.PLAN_MEDIATION_CHG) - m = self.image.get_manifest(f, - ignore_excludes=True) - mediated = [] - for act in m.gen_actions_by_types(("hardlink", - "link"), excludes=self.__new_excludes): - try: - mediator = act.attrs["mediator"] - except KeyError: - continue - if mediator in new_mediators: - mediated.append(act) - - if mediated: - pp = pkgplan.PkgPlan(self.image) - pp.propose_repair(f, m, mediated, - misc.EmptyI) - pp.evaluate(self.__new_excludes, - self.__new_excludes, - can_exclude=True) - self.pd.pkg_plans.append(pp) - else: - # Only the source property is being updated for - # these mediators, so no packages needed loading. - self.pd._new_mediators = update_mediators - - pt.plan_done(pt.PLAN_MEDIATION_CHG) - self.__finish_plan(plandesc.EVALUATED_PKGS) - - def __any_reject_matches(self, reject_list): - """Check if any reject patterns match installed packages (in - which case a packaging operation should attempt to uninstall - those packages).""" - - # return true if any packages in reject list - # match any installed packages - return bool(reject_list) and \ - bool(self.match_user_stems(self.image, reject_list, - self.MATCH_INST_VERSIONS, raise_not_installed=False)) - - def plan_sync(self, li_pkg_updates=True, reject_list=misc.EmptyI): - """Determine the fmri changes needed to sync the image.""" - - self.__plan_op() - - # check if we need to uninstall any packages. - uninstall = self.__any_reject_matches(reject_list) - - # check if inherited facets are changing - new_facets = self.__evaluate_excludes()[1] - - # audits are fast, so do an audit to check if we're in sync. - insync = self.image.linked.insync() - - # if we're not trying to uninstall packages, and inherited - # facets are not changing, and we're already in sync, then - # don't bother invoking the solver. - if not uninstall and not new_facets is not None and insync: - # we don't need to do anything - self.__finish_plan(plandesc.EVALUATED_PKGS, - fmri_changes=[]) - return - - self.__plan_install(li_pkg_updates=li_pkg_updates, - li_sync_op=True, reject_list=reject_list) - - def plan_uninstall(self, pkgs_to_uninstall, ignore_missing=False): - self.__plan_op() - proposed_dict, self.__match_rm = self.__match_user_fmris( - self.image, pkgs_to_uninstall, self.MATCH_INST_VERSIONS, - raise_not_installed=not ignore_missing) - - # merge patterns together - proposed_removals = set([ - f - for each in proposed_dict.values() - for f in each - ]) - - # check if inherited facets are changing - new_facets = self.__evaluate_excludes()[1] - - # build installed dict - installed_dict = ImagePlan.__fmris2dict( - self.image.gen_installed_pkgs()) - - def solver_cb(ignore_inst_parent_deps): - # instantiate solver - solver = pkg_solver.PkgSolver( - self.image.get_catalog( - self.image.IMG_CATALOG_KNOWN), - installed_dict, - self.image.get_publisher_ranks(), - self.image.get_variants(), - self.image.avoid_set_get(), - self.image.linked.parent_fmris(), - self.__progtrack) - - # check for triggered ops - self.__set_pkg_actuators(pkgs_to_uninstall, - pkgdefs.PKG_OP_UNINSTALL, solver) - - # run solver - new_vector, new_avoid_obs = \ - solver.solve_uninstall( - self.image.get_frozen_list(), - proposed_removals, - self.__new_excludes, - ignore_inst_parent_deps=\ - ignore_inst_parent_deps) - - return solver, new_vector, new_avoid_obs - - # Solve; will raise exceptions if no solution is found. - solver, new_vector, self.pd._new_avoid_obs = \ - self.__run_solver(solver_cb) - - self.pd._fmri_changes = self.__vector_2_fmri_changes( - installed_dict, new_vector, - new_facets=new_facets) - - self.__add_pkg_actuators_to_pd( - [x.pkg_name for x in proposed_removals]) - - self.pd._solver_summary = str(solver) - if DebugValues["plan"]: - self.pd._solver_errors = solver.get_trim_errors() - - self.__finish_plan(plandesc.EVALUATED_PKGS) - - def __plan_update_solver(self, pkgs_update=None, - ignore_missing=False, reject_list=misc.EmptyI): - """Use the solver to determine the fmri changes needed to - update the specified pkgs or all packages if none were - specified.""" - - # check if inherited facets are changing - new_facets = self.__evaluate_excludes()[1] - - # get ranking of publishers - pub_ranks = self.image.get_publisher_ranks() - - # build installed dict - installed_dict = ImagePlan.__fmris2dict( - self.image.gen_installed_pkgs()) - - # If specific packages or patterns were provided, then - # determine the proposed set to pass to the solver. - if reject_list: - reject_set = self.match_user_stems(self.image, - reject_list, self.MATCH_ALL) - else: - reject_set = set() - - if pkgs_update: - update_dict, references = self.__match_user_fmris( - self.image, pkgs_update, self.MATCH_INST_STEMS, - pub_ranks=pub_ranks, installed_pkgs=installed_dict, - raise_not_installed=not ignore_missing, - reject_set=reject_set) - self.__match_update = references - - def solver_cb(ignore_inst_parent_deps): - # instantiate solver - solver = pkg_solver.PkgSolver( - self.image.get_catalog( - self.image.IMG_CATALOG_KNOWN), - installed_dict, - pub_ranks, - self.image.get_variants(), - self.image.avoid_set_get(), - self.image.linked.parent_fmris(), - self.__progtrack) - - if reject_list: - # use reject_list, not reject_set, to preserve - # input intent (e.g. 'pkg:/', '/' prefixes). - self.__set_pkg_actuators(reject_list, - pkgdefs.PKG_OP_UNINSTALL, solver) - - # run solver - if pkgs_update: - new_vector, new_avoid_obs = \ - solver.solve_install( - self.image.get_frozen_list(), - update_dict, - excludes=self.__new_excludes, - reject_set=reject_set, - trim_proposed_installed=False, - ignore_inst_parent_deps=\ - ignore_inst_parent_deps) - else: - # Updating all installed packages requires a - # different solution path. - new_vector, new_avoid_obs = \ - solver.solve_update_all( - self.image.get_frozen_list(), - excludes=self.__new_excludes, - reject_set=reject_set) - - return solver, new_vector, new_avoid_obs - - # We can't retry this operation while ignoring parent - # dependencies if we're doing a unconstrained update. - retry_wo_parent_deps = bool(pkgs_update) - - # Solve; will raise exceptions if no solution is found. - solver, new_vector, self.pd._new_avoid_obs = \ - self.__run_solver(solver_cb, \ - retry_wo_parent_deps=retry_wo_parent_deps) - - self.pd._fmri_changes = self.__vector_2_fmri_changes( - installed_dict, new_vector, - new_facets=new_facets) - - self.__add_pkg_actuators_to_pd(reject_set) - - self.pd._solver_summary = str(solver) - if DebugValues["plan"]: - self.pd._solver_errors = solver.get_trim_errors() - - def plan_update(self, pkgs_update=None, - ignore_missing=False, reject_list=misc.EmptyI): - """Determine the fmri changes needed to update the specified - pkgs or all packages if none were specified.""" - - self.__plan_op() - self.__plan_update_solver( - ignore_missing=ignore_missing, - pkgs_update=pkgs_update, - reject_list=reject_list) - self.__finish_plan(plandesc.EVALUATED_PKGS) - - def plan_revert(self, args, tagged): - """Plan reverting the specified files or files tagged as - specified. We create the pkgplans here rather than in - evaluate; by keeping the list of changed_fmris empty we - skip most of the processing in evaluate. - We also process revert tags on directories here""" - - self.__plan_op() - self.__evaluate_excludes() - - revert_dict = defaultdict(list) - revert_dirs = defaultdict(list) - - pt = self.__progtrack - pt.plan_all_start() - - # since the fmri list stays empty, we can set this; - # we need this set so we can build directories and - # actions lists as we're doing checking with installed - # actions earlier here. - self.pd.state = plandesc.EVALUATED_PKGS - - # We could have a specific 'revert' tracker item, but - # "package planning" seems as good a term as any. - pt.plan_start(pt.PLAN_PKGPLAN, - goal=self.image.count_installed_pkgs()) - if tagged: - # look through all the files on the system; any files - # tagged w/ revert-tag set to any of the values on - # the command line need to be checked and reverted if - # they differ from the manifests. Note we don't care - # if the file is editable or not. - - # look through directories to see if any have our - # revert-tag set; we then need to check the value - # to find any unpackaged files that need deletion. - tag_set = set(args) - for f in self.image.gen_installed_pkgs(): - pt.plan_add_progress(pt.PLAN_PKGPLAN) - m = self.image.get_manifest(f, - ignore_excludes=True) - for act in m.gen_actions_by_type("file", - excludes=self.__new_excludes): - if "revert-tag" in act.attrs and \ - (set(act.attrlist("revert-tag")) & - tag_set): - revert_dict[(f, m)].append(act) - - for act in m.gen_actions_by_type("dir", - excludes=self.__new_excludes): - if "revert-tag" not in act.attrs: - continue - for a in act.attrlist("revert-tag"): - tag_parts = a.split("=", 2) - if tag_parts[0] not in tag_set or \ - len(tag_parts) != 2: - continue - revert_dirs[(f, m)].append( - self.__gen_matching_acts( - act.attrs["path"], - tag_parts[1])) - else: - # look through all the packages, looking for our files - # we could use search for this. We don't support reverting - # directories by ad-hoc means. - - revertpaths = set([a.lstrip(os.path.sep) for a in args]) - overlaypaths = set() - for f in self.image.gen_installed_pkgs(): - pt.plan_add_progress(pt.PLAN_PKGPLAN) - m = self.image.get_manifest(f, - ignore_excludes=True) - for act in m.gen_actions_by_type("file", - excludes=self.__new_excludes): - path = act.attrs["path"] - if path in revertpaths or \ - path in overlaypaths: - revert_dict[(f, m)].append(act) - if act.attrs.get("overlay") == \ - "allow": - # Action allows overlay, - # all matching actions - # must be collected. - # The imageplan will - # automatically handle - # the overlaid action - # if an overlaying - # action is present. - overlaypaths.add(path) - revertpaths.discard(path) - - revertpaths.difference_update(overlaypaths) - if revertpaths: - pt.plan_done(pt.PLAN_PKGPLAN) - pt.plan_all_done() - raise api_errors.PlanCreationException( - nofiles=list(revertpaths)) - - for f, m in revert_dict.keys(): - # build list of actions that will need to be reverted - # no sense in replacing files that are original already - needs_change = [] - pt.plan_add_progress(pt.PLAN_PKGPLAN, nitems=0) - for act in revert_dict[(f, m)]: - # delete preserve and preserve-version - # attributes to both find and enable - # replacement of modified editable files. - # Note: preserve=abandon should be skipped - # because, by definition, these files should - # not be restored (they have been abandoned) - if act.attrs.get("preserve") == "abandon": - continue - - act.attrs.pop("preserve", None) - act.attrs.pop("preserve-version", None) - act.verify(self.image, forever=True) - if act.replace_required == True: - needs_change.append(act) - - revert_dict[(f, m)] = needs_change - - for f, m in revert_dirs: - needs_delete = [] - for unchecked, checked in revert_dirs[(f, m)]: - # just add these... - needs_delete.extend(checked) - # look for these - for un in unchecked: - path = un.attrs["path"] - if path not in self.get_actions("file") \ - and path not in self.get_actions("hardlink") \ - and path not in self.get_actions("link"): - needs_delete.append(un) - revert_dirs[(f, m)] = needs_delete - - # build the pkg plans, making sure to propose only one repair - # per fmri - for f, m in set(list(revert_dirs.keys()) + list(revert_dict.keys())): - needs_delete = revert_dirs[(f, m)] - needs_change = revert_dict[(f, m)] - if not needs_delete and not needs_change: - continue + # run solver + if pkgs_update: + new_vector, new_avoid_obs = solver.solve_install( + self.image.get_frozen_list(), + update_dict, + excludes=self.__new_excludes, + reject_set=reject_set, + trim_proposed_installed=False, + ignore_inst_parent_deps=ignore_inst_parent_deps, + ) + else: + # Updating all installed packages requires a + # different solution path. + new_vector, new_avoid_obs = solver.solve_update_all( + self.image.get_frozen_list(), + excludes=self.__new_excludes, + reject_set=reject_set, + ) - pp = pkgplan.PkgPlan(self.image) - pp.propose_repair(f, m, needs_change, needs_delete) - pp.evaluate(self.__new_excludes, self.__new_excludes, - can_exclude=True) - self.pd.pkg_plans.append(pp) + return solver, new_vector, new_avoid_obs + + # We can't retry this operation while ignoring parent + # dependencies if we're doing a unconstrained update. + retry_wo_parent_deps = bool(pkgs_update) + + # Solve; will raise exceptions if no solution is found. + solver, new_vector, self.pd._new_avoid_obs = self.__run_solver( + solver_cb, retry_wo_parent_deps=retry_wo_parent_deps + ) + + self.pd._fmri_changes = self.__vector_2_fmri_changes( + installed_dict, new_vector, new_facets=new_facets + ) + + self.__add_pkg_actuators_to_pd(reject_set) + + self.pd._solver_summary = str(solver) + if DebugValues["plan"]: + self.pd._solver_errors = solver.get_trim_errors() + + def plan_update( + self, pkgs_update=None, ignore_missing=False, reject_list=misc.EmptyI + ): + """Determine the fmri changes needed to update the specified + pkgs or all packages if none were specified.""" + + self.__plan_op() + self.__plan_update_solver( + ignore_missing=ignore_missing, + pkgs_update=pkgs_update, + reject_list=reject_list, + ) + self.__finish_plan(plandesc.EVALUATED_PKGS) + + def plan_revert(self, args, tagged): + """Plan reverting the specified files or files tagged as + specified. We create the pkgplans here rather than in + evaluate; by keeping the list of changed_fmris empty we + skip most of the processing in evaluate. + We also process revert tags on directories here""" + + self.__plan_op() + self.__evaluate_excludes() + + revert_dict = defaultdict(list) + revert_dirs = defaultdict(list) + + pt = self.__progtrack + pt.plan_all_start() + + # since the fmri list stays empty, we can set this; + # we need this set so we can build directories and + # actions lists as we're doing checking with installed + # actions earlier here. + self.pd.state = plandesc.EVALUATED_PKGS + + # We could have a specific 'revert' tracker item, but + # "package planning" seems as good a term as any. + pt.plan_start(pt.PLAN_PKGPLAN, goal=self.image.count_installed_pkgs()) + if tagged: + # look through all the files on the system; any files + # tagged w/ revert-tag set to any of the values on + # the command line need to be checked and reverted if + # they differ from the manifests. Note we don't care + # if the file is editable or not. + + # look through directories to see if any have our + # revert-tag set; we then need to check the value + # to find any unpackaged files that need deletion. + tag_set = set(args) + for f in self.image.gen_installed_pkgs(): + pt.plan_add_progress(pt.PLAN_PKGPLAN) + m = self.image.get_manifest(f, ignore_excludes=True) + for act in m.gen_actions_by_type( + "file", excludes=self.__new_excludes + ): + if "revert-tag" in act.attrs and ( + set(act.attrlist("revert-tag")) & tag_set + ): + revert_dict[(f, m)].append(act) + for act in m.gen_actions_by_type( + "dir", excludes=self.__new_excludes + ): + if "revert-tag" not in act.attrs: + continue + for a in act.attrlist("revert-tag"): + tag_parts = a.split("=", 2) + if tag_parts[0] not in tag_set or len(tag_parts) != 2: + continue + revert_dirs[(f, m)].append( + self.__gen_matching_acts( + act.attrs["path"], tag_parts[1] + ) + ) + else: + # look through all the packages, looking for our files + # we could use search for this. We don't support reverting + # directories by ad-hoc means. + + revertpaths = set([a.lstrip(os.path.sep) for a in args]) + overlaypaths = set() + for f in self.image.gen_installed_pkgs(): + pt.plan_add_progress(pt.PLAN_PKGPLAN) + m = self.image.get_manifest(f, ignore_excludes=True) + for act in m.gen_actions_by_type( + "file", excludes=self.__new_excludes + ): + path = act.attrs["path"] + if path in revertpaths or path in overlaypaths: + revert_dict[(f, m)].append(act) + if act.attrs.get("overlay") == "allow": + # Action allows overlay, + # all matching actions + # must be collected. + # The imageplan will + # automatically handle + # the overlaid action + # if an overlaying + # action is present. + overlaypaths.add(path) + revertpaths.discard(path) + + revertpaths.difference_update(overlaypaths) + if revertpaths: pt.plan_done(pt.PLAN_PKGPLAN) pt.plan_all_done() - self.__finish_plan(plandesc.EVALUATED_PKGS, fmri_changes=[]) - - def __gen_matching_acts(self, path, pattern): - # return two lists of actions that match pattern at path - # include (recursively) directories only if they are not - # implicitly or explicitly packaged. First list may - # contain packaged objects, second does not. - - if path == os.path.sep: # not doing root - return [], [] - - dir_loc = os.path.join(self.image.root, path) - - # If this is a mount point, disable this; too easy - # to break things. This means this doesn't work - # on /var and /tmp - that's ok. - - try: - # if dir is missing, nothing to delete :) - my_dev = os.stat(dir_loc).st_dev - except OSError: - return [], [] - - # disallow mount points for safety's sake. - if my_dev != os.stat(os.path.dirname(dir_loc)).st_dev: - return [], [] - - # Any explicit or implicitly packaged directories are - # ignored; checking all directory entries is cheap. - paths = [ - os.path.join(dir_loc, a) - for a in fnmatch.filter(os.listdir(dir_loc), pattern) - if os.path.join(path, a) not in self.__get_directories() - ] - - # now we have list of items to be removed. We know that - # any directories are not packaged, so expand those here - # and generate actions - unchecked = [] - checked = [] - - for path in paths: - if os.path.isdir(path) and not os.path.islink(path): - # we have a directory that needs expanding; - # add it in and then walk the contents - checked.append(self.__gen_del_act(path)) - for dirpath, dirnames, filenames in os.walk(path): - # crossed mountpoints - don't go here. - if os.stat(dirpath).st_dev != my_dev: - continue - for name in dirnames + filenames: - checked.append( - self.__gen_del_act( - os.path.join(dirpath, name))) - else: - unchecked.append(self.__gen_del_act(path)) - return unchecked, checked - - def __gen_del_act(self, path): - # With fully qualified path, return action suitable for - # deletion from image. Don't bother getting owner - # and group right; we're just going to blow it up anyway. - - rootdir = self.image.root - pubpath = pkg.misc.relpath(path, rootdir) - pstat = os.lstat(path) - mode = oct(stat.S_IMODE(pstat.st_mode)) - if stat.S_ISLNK(pstat.st_mode): - return pkg.actions.link.LinkAction(None, - target=os.readlink(path), path=pubpath) - elif stat.S_ISDIR(pstat.st_mode): - return pkg.actions.directory.DirectoryAction(None, - mode=mode, owner="root", - group="bin", path=pubpath) - else: # treat everything else as a file - return pkg.actions.file.FileAction(None, - mode=mode, owner="root", - group="bin", path=pubpath) - - def __get_overlaying(self, img, act, pfmri): - """Given an action with attribute overlay=allow, if there is an - overlaying action installed in the image, return the overlaying - package's FMRI and the action.""" - - for f in img.gen_installed_pkgs(): - if f == pfmri: - # Not interested in ourselves. - continue - m = img.get_manifest(f) - matching = list(m.gen_actions_by_types([act.name], - {"path": [act.attrs["path"]]})) - if matching: - # Only one action can overlay another, so we - # know we've found a match at this point. - return f, matching[0] - return None, None - - def __process_verify_result(self, act, pfmri, pt, - verifypaths=None, overlaypaths=None, ovlying_fmri=None, - ovlying_act=None, skip_ovlying=False, ovly_entries=None): - """Process delayed actions.""" - if not ovlying_act and not skip_ovlying: - # Find the overlaying fmri/action. - ovlying_fmri, ovlying_act = self.__get_overlaying( - self.image, act, pfmri) - # If overlaying action is found, we use the overlaying action - # for verification. - if ovlying_act: - # Update overlaying entries with the newly found - # overlaying action. - if ovly_entries: - ovlying_path = ovlying_act.attrs.get("path") - if ovlying_path in ovly_entries: - ovly_entries[ovlying_path].append( - (ovlying_fmri, ovlying_act, - "overlaying")) - for oing_act, errors, warnings, pinfo, is_overlaid in \ - self.image.verify(pfmri, pt, - verifypaths=verifypaths, overlaypaths=overlaypaths, - single_act=ovlying_act, verbose=True, - forever=True): - return oing_act, errors, warnings, pinfo, \ - ovlying_fmri - else: - for olaid_act, errors, warnings, pinfo, is_overlaid \ - in self.image.verify(pfmri, pt, - verifypaths=verifypaths, overlaypaths=overlaypaths, - single_act=act, verbose=True, forever=True): - return olaid_act, errors, warnings, pinfo, \ - None - return act, [], [], [], None - - def __is_active_liveroot_be(self, img): - """Check if an image is in an active live be.""" - - if not img.is_liveroot(): - return False, None - - try: - be_name, be_uuid = bootenv.BootEnv.get_be_name( - img.root) - return True, be_name - except api_errors.BEException: - # If boot environment logic isn't supported, return - # False. This is necessary for user images and for - # the test suite. - return False, None - - def __alt_image_root_with_new_be(self, dup_be_name, orig_img_root): - img_root = orig_img_root - mntpoint = None - if not dup_be_name: - dup_be_name = "duplicate_livebe_for_verify" - isalbe, src_be_name = self.__is_active_liveroot_be(self.image) - if not isalbe: - return img_root, mntpoint + raise api_errors.PlanCreationException( + nofiles=list(revertpaths) + ) + for f, m in revert_dict.keys(): + # build list of actions that will need to be reverted + # no sense in replacing files that are original already + needs_change = [] + pt.plan_add_progress(pt.PLAN_PKGPLAN, nitems=0) + for act in revert_dict[(f, m)]: + # delete preserve and preserve-version + # attributes to both find and enable + # replacement of modified editable files. + # Note: preserve=abandon should be skipped + # because, by definition, these files should + # not be restored (they have been abandoned) + if act.attrs.get("preserve") == "abandon": + continue + + act.attrs.pop("preserve", None) + act.attrs.pop("preserve-version", None) + act.verify(self.image, forever=True) + if act.replace_required == True: + needs_change.append(act) + + revert_dict[(f, m)] = needs_change + + for f, m in revert_dirs: + needs_delete = [] + for unchecked, checked in revert_dirs[(f, m)]: + # just add these... + needs_delete.extend(checked) + # look for these + for un in unchecked: + path = un.attrs["path"] + if ( + path not in self.get_actions("file") + and path not in self.get_actions("hardlink") + and path not in self.get_actions("link") + ): + needs_delete.append(un) + revert_dirs[(f, m)] = needs_delete + + # build the pkg plans, making sure to propose only one repair + # per fmri + for f, m in set(list(revert_dirs.keys()) + list(revert_dict.keys())): + needs_delete = revert_dirs[(f, m)] + needs_change = revert_dict[(f, m)] + if not needs_delete and not needs_change: + continue + + pp = pkgplan.PkgPlan(self.image) + pp.propose_repair(f, m, needs_change, needs_delete) + pp.evaluate( + self.__new_excludes, self.__new_excludes, can_exclude=True + ) + self.pd.pkg_plans.append(pp) + + pt.plan_done(pt.PLAN_PKGPLAN) + pt.plan_all_done() + self.__finish_plan(plandesc.EVALUATED_PKGS, fmri_changes=[]) + + def __gen_matching_acts(self, path, pattern): + # return two lists of actions that match pattern at path + # include (recursively) directories only if they are not + # implicitly or explicitly packaged. First list may + # contain packaged objects, second does not. + + if path == os.path.sep: # not doing root + return [], [] + + dir_loc = os.path.join(self.image.root, path) + + # If this is a mount point, disable this; too easy + # to break things. This means this doesn't work + # on /var and /tmp - that's ok. + + try: + # if dir is missing, nothing to delete :) + my_dev = os.stat(dir_loc).st_dev + except OSError: + return [], [] + + # disallow mount points for safety's sake. + if my_dev != os.stat(os.path.dirname(dir_loc)).st_dev: + return [], [] + + # Any explicit or implicitly packaged directories are + # ignored; checking all directory entries is cheap. + paths = [ + os.path.join(dir_loc, a) + for a in fnmatch.filter(os.listdir(dir_loc), pattern) + if os.path.join(path, a) not in self.__get_directories() + ] + + # now we have list of items to be removed. We know that + # any directories are not packaged, so expand those here + # and generate actions + unchecked = [] + checked = [] + + for path in paths: + if os.path.isdir(path) and not os.path.islink(path): + # we have a directory that needs expanding; + # add it in and then walk the contents + checked.append(self.__gen_del_act(path)) + for dirpath, dirnames, filenames in os.walk(path): + # crossed mountpoints - don't go here. + if os.stat(dirpath).st_dev != my_dev: + continue + for name in dirnames + filenames: + checked.append( + self.__gen_del_act(os.path.join(dirpath, name)) + ) + else: + unchecked.append(self.__gen_del_act(path)) + return unchecked, checked + + def __gen_del_act(self, path): + # With fully qualified path, return action suitable for + # deletion from image. Don't bother getting owner + # and group right; we're just going to blow it up anyway. + + rootdir = self.image.root + pubpath = pkg.misc.relpath(path, rootdir) + pstat = os.lstat(path) + mode = oct(stat.S_IMODE(pstat.st_mode)) + if stat.S_ISLNK(pstat.st_mode): + return pkg.actions.link.LinkAction( + None, target=os.readlink(path), path=pubpath + ) + elif stat.S_ISDIR(pstat.st_mode): + return pkg.actions.directory.DirectoryAction( + None, mode=mode, owner="root", group="bin", path=pubpath + ) + else: # treat everything else as a file + return pkg.actions.file.FileAction( + None, mode=mode, owner="root", group="bin", path=pubpath + ) + + def __get_overlaying(self, img, act, pfmri): + """Given an action with attribute overlay=allow, if there is an + overlaying action installed in the image, return the overlaying + package's FMRI and the action.""" + + for f in img.gen_installed_pkgs(): + if f == pfmri: + # Not interested in ourselves. + continue + m = img.get_manifest(f) + matching = list( + m.gen_actions_by_types( + [act.name], {"path": [act.attrs["path"]]} + ) + ) + if matching: + # Only one action can overlay another, so we + # know we've found a match at this point. + return f, matching[0] + return None, None + + def __process_verify_result( + self, + act, + pfmri, + pt, + verifypaths=None, + overlaypaths=None, + ovlying_fmri=None, + ovlying_act=None, + skip_ovlying=False, + ovly_entries=None, + ): + """Process delayed actions.""" + if not ovlying_act and not skip_ovlying: + # Find the overlaying fmri/action. + ovlying_fmri, ovlying_act = self.__get_overlaying( + self.image, act, pfmri + ) + # If overlaying action is found, we use the overlaying action + # for verification. + if ovlying_act: + # Update overlaying entries with the newly found + # overlaying action. + if ovly_entries: + ovlying_path = ovlying_act.attrs.get("path") + if ovlying_path in ovly_entries: + ovly_entries[ovlying_path].append( + (ovlying_fmri, ovlying_act, "overlaying") + ) + for ( + oing_act, + errors, + warnings, + pinfo, + is_overlaid, + ) in self.image.verify( + pfmri, + pt, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + single_act=ovlying_act, + verbose=True, + forever=True, + ): + return oing_act, errors, warnings, pinfo, ovlying_fmri + else: + for ( + olaid_act, + errors, + warnings, + pinfo, + is_overlaid, + ) in self.image.verify( + pfmri, + pt, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + single_act=act, + verbose=True, + forever=True, + ): + return olaid_act, errors, warnings, pinfo, None + return act, [], [], [], None + + def __is_active_liveroot_be(self, img): + """Check if an image is in an active live be.""" + + if not img.is_liveroot(): + return False, None + + try: + be_name, be_uuid = bootenv.BootEnv.get_be_name(img.root) + return True, be_name + except api_errors.BEException: + # If boot environment logic isn't supported, return + # False. This is necessary for user images and for + # the test suite. + return False, None + + def __alt_image_root_with_new_be(self, dup_be_name, orig_img_root): + img_root = orig_img_root + mntpoint = None + if not dup_be_name: + dup_be_name = "duplicate_livebe_for_verify" + isalbe, src_be_name = self.__is_active_liveroot_be(self.image) + if not isalbe: + return img_root, mntpoint + + try: + bootenv.BootEnv.cleanup_be(dup_be_name) + temp_root = misc.config_temp_root() + mntpoint = tempfile.mkdtemp( + dir=temp_root, prefix="pkg-verify" + "-" + ) + bootenv.BootEnv.copy_be(src_be_name, dup_be_name) + bootenv.BootEnv.mount_be(dup_be_name, mntpoint) + img_root = mntpoint + except Exception as e: + did_create = dup_be_name in bootenv.BootEnv.get_be_names() + warn = _( + "Cannot create or mount a copy of current be. " + "Reporting unpackaged content aganist current live " + "image." + ) + fallback = False + timestamp = misc.time_to_timestamp(time.time()) + if did_create: try: - bootenv.BootEnv.cleanup_be(dup_be_name) - temp_root = misc.config_temp_root() - mntpoint = tempfile.mkdtemp(dir=temp_root, - prefix="pkg-verify" + "-") - bootenv.BootEnv.copy_be(src_be_name, dup_be_name) + if img_root != mntpoint: bootenv.BootEnv.mount_be(dup_be_name, mntpoint) img_root = mntpoint except Exception as e: - did_create = dup_be_name in \ - bootenv.BootEnv.get_be_names() - warn = _("Cannot create or mount a copy of current be. " - "Reporting unpackaged content aganist current live " - "image.") - fallback = False - timestamp = misc.time_to_timestamp(time.time()) - if did_create: - try: - if img_root != mntpoint: - bootenv.BootEnv.mount_be( - dup_be_name, mntpoint) - img_root = mntpoint - except Exception as e: - # Cannot mount be, fallback. - fallback = True - else: - # Cannot create be, fallback. - fallback = True - - if fallback: - shutil.rmtree(mntpoint, ignore_errors=True) - self.pd.add_item_message("warnings", timestamp, - MSG_WARNING, warn, msg_type=MSG_UNPACKAGED, - parent="unpackaged") - return img_root, mntpoint - - def __process_unpackaged(self, proposed_fmris, pt=None, - dup_be_name="duplicate_livebe_for_verify"): - allentries = {} - img_root = self.image.get_root() - - for fmri in proposed_fmris: - m = self.image.get_manifest(fmri) - for act in m.gen_actions(): - if act.name in ["link", "hardlink", "dir", - "file"]: - install_path = os.path.normpath( - os.path.join(img_root, - act.attrs["path"])) - allentries[install_path] = act.name - # Process possible implicit directories. - for d in m.get_directories(()): - install_path = os.path.normpath( - os.path.join(img_root, d)) - if install_path not in allentries: - allentries[install_path] = "dir" - if pt: - pt.plan_add_progress(pt.PLAN_PKG_VERIFY) - - def handle_walk_error(oserror_inst): - timestamp = misc.time_to_timestamp(time.time()) - self.pd.add_item_message("errors", timestamp, - MSG_ERROR, str(oserror_inst), - msg_type=MSG_UNPACKAGED, parent="unpackaged") - - orig_img_root = img_root - img_root, mntpoint = self.__alt_image_root_with_new_be( - dup_be_name, orig_img_root) - - # Walk through file system structure. - for root, dirs, files in os.walk(img_root, - onerror=handle_walk_error): - newdirs = [] - # Since we possibly changed the img_root into the - # mounted be root, we need to change it back for look - # up. - orig_root = os.path.normpath(os.path.join( - orig_img_root, os.path.relpath(root, img_root))) - for d in sorted(dirs): - timestamp = misc.time_to_timestamp(time.time()) - path = os.path.normpath(os.path.join( - orig_root, d)) - # Since the mntpoint is created solely for - # verify purpose, ignore it. - if mntpoint and path == mntpoint: - continue - if path not in allentries or allentries[path] \ - not in ["dir", "link"]: - self.pd.add_item_message( - _("dir: {0}").format(path), - timestamp, - MSG_INFO, - _("Unpackaged directory"), - msg_type=MSG_UNPACKAGED, - parent="unpackaged") - else: - newdirs.append(d) - dirs[:] = newdirs - - for f in sorted(files): - timestamp = misc.time_to_timestamp(time.time()) - path = os.path.normpath(os.path.join( - orig_root, f)) - if path not in allentries or allentries[path] \ - not in ["file", "link", "hardlink"]: - self.pd.add_item_message( - _("file: {0}").format(path), - timestamp, MSG_INFO, - _("Unpackaged file"), - msg_type=MSG_UNPACKAGED, - parent="unpackaged") - - def __process_msgs(self, entries, pfmri, msg_level, result, needs_fix, - repairs): - """Generate plan message for verify result.""" - - timestamp = misc.time_to_timestamp(time.time()) - ffmri = str(pfmri) - self.pd.add_item_message(ffmri, timestamp, - msg_level, _("{pkg_name:70} {result:>7}").format( - pkg_name=pfmri.get_pkg_stem(), - result=result)) + # Cannot mount be, fallback. + fallback = True + else: + # Cannot create be, fallback. + fallback = True + + if fallback: + shutil.rmtree(mntpoint, ignore_errors=True) + self.pd.add_item_message( + "warnings", + timestamp, + MSG_WARNING, + warn, + msg_type=MSG_UNPACKAGED, + parent="unpackaged", + ) + return img_root, mntpoint + + def __process_unpackaged( + self, proposed_fmris, pt=None, dup_be_name="duplicate_livebe_for_verify" + ): + allentries = {} + img_root = self.image.get_root() + + for fmri in proposed_fmris: + m = self.image.get_manifest(fmri) + for act in m.gen_actions(): + if act.name in ["link", "hardlink", "dir", "file"]: + install_path = os.path.normpath( + os.path.join(img_root, act.attrs["path"]) + ) + allentries[install_path] = act.name + # Process possible implicit directories. + for d in m.get_directories(()): + install_path = os.path.normpath(os.path.join(img_root, d)) + if install_path not in allentries: + allentries[install_path] = "dir" + if pt: + pt.plan_add_progress(pt.PLAN_PKG_VERIFY) + + def handle_walk_error(oserror_inst): + timestamp = misc.time_to_timestamp(time.time()) + self.pd.add_item_message( + "errors", + timestamp, + MSG_ERROR, + str(oserror_inst), + msg_type=MSG_UNPACKAGED, + parent="unpackaged", + ) + + orig_img_root = img_root + img_root, mntpoint = self.__alt_image_root_with_new_be( + dup_be_name, orig_img_root + ) + + # Walk through file system structure. + for root, dirs, files in os.walk(img_root, onerror=handle_walk_error): + newdirs = [] + # Since we possibly changed the img_root into the + # mounted be root, we need to change it back for look + # up. + orig_root = os.path.normpath( + os.path.join(orig_img_root, os.path.relpath(root, img_root)) + ) + for d in sorted(dirs): timestamp = misc.time_to_timestamp(time.time()) - - for act, errors, warnings, info, oing_fmri in entries: - if act: - item_id = act.distinguished_name() - if oing_fmri: - item_id += ' (from {0})'.format( - oing_fmri.get_pkg_stem(anarchy=True)) - parent = ffmri - else: - item_id = ffmri - parent = None - for x in errors: - self.pd.add_item_message(item_id, - timestamp, MSG_ERROR, - _("ERROR: {0}").format(x), - parent=parent) - for x in warnings: - self.pd.add_item_message(item_id, - timestamp, MSG_WARNING, - _("WARNING: {0}").format(x), - parent=parent) - for x in info: - self.pd.add_item_message(item_id, - timestamp, MSG_INFO, - _("{0}").format(x), - parent=parent) - - if needs_fix: - # Eliminate policy-based entries with no repair - # action. - for x in needs_fix: - if x[0] in repairs: - if x[1] is not None: - repairs[x[0]].append( - x[1]) - else: - if x[1] is None: - repairs[x[0]] = [] - else: - repairs[x[0]] = [x[1]] - - def __get_overlaying_act_from_cache(self, path, overlay_entries): - """Get overlaying action from overlay_entries cache.""" - - for e in overlay_entries[path]: - if e[2] == 'overlaying': - return e[0], e[1] - return None, None - - def __add_to_processed(self, oing_fmri, overlay, def_pkgs, pfmri, act, - errors, warnings, pinfo): - """Add newly processed actions results into cache.""" - - # If found overlaying package. - if oing_fmri: - # If the action is an overlaid one, attach the - # overlaying fmri for msg print. - if overlay == "overlaid": - def_pkgs[ - pfmri].append((act, errors, warnings, pinfo, - oing_fmri)) - # The overlaying action is itself. - else: - def_pkgs[ - pfmri].append((act, errors, warnings, pinfo, - None)) + path = os.path.normpath(os.path.join(orig_root, d)) + # Since the mntpoint is created solely for + # verify purpose, ignore it. + if mntpoint and path == mntpoint: + continue + if path not in allentries or allentries[path] not in [ + "dir", + "link", + ]: + self.pd.add_item_message( + _("dir: {0}").format(path), + timestamp, + MSG_INFO, + _("Unpackaged directory"), + msg_type=MSG_UNPACKAGED, + parent="unpackaged", + ) else: - def_pkgs[ - pfmri].append((act, errors, warnings, pinfo, - None)) - - def __process_per_overlay_action(self, args, pfmri, entry, - def_pkgs, verifypaths, overlaypaths, overlay_entries, pt): - """Process per overlay action.""" - - act = entry[0] - overlay = entry[1] - path = act.attrs.get("path") - # Try the overlay_entries cache first. - oing_fmri, oing_act = \ - self.__get_overlaying_act_from_cache( - path, overlay_entries) - # If found, process it directly. - if oing_act: - act, errors, warnings, pinfo, \ - oing_fmri = \ - self.__process_verify_result( - act, pfmri, pt, - verifypaths=verifypaths, - overlaypaths=overlaypaths, - ovlying_fmri=oing_fmri, - ovlying_act=oing_act) - elif args: - # Not all fmris were processed, we need to find the - # overlaying action. - # - # Also need to collect newly found overlaying - # actions into overlay_entries if any. - act, errors, warnings, pinfo, oing_fmri = \ - self.__process_verify_result( - act, pfmri, pt, - verifypaths=verifypaths, - overlaypaths=overlaypaths, - ovly_entries=overlay_entries) - else: - # All fmris were processed, if the cache didn't contain - # the action, that means no overlaying action. - # - # We need to skip overlaying action finding, since we - # already know no overlaying action in the cache. - act, errors, warnings, pinfo, oing_fmri = \ - self.__process_verify_result( - act, pfmri, pt, - verifypaths=verifypaths, - overlaypaths=overlaypaths, - skip_ovlying=True) - self.__add_to_processed(oing_fmri, overlay, def_pkgs, pfmri, - act, errors, warnings, pinfo) - - def __check_attr_mismatch_between_actions(self, overlaid, overlaying): - """Check attribute mismatch between overlaying and overlaid - actions.""" - - overlaid_act = overlaid[1] - overlaying_act = overlaying[1] - o_attr_overlaid = overlaid_act.attrs.get("overlay-attributes") - o_attr_overlaying = overlaying_act.attrs.get( - "overlay-attributes") - owner_overlaid = overlaid_act.attrs["owner"] - owner_overlaying = overlaying_act.attrs["owner"] - mode_overlaid = overlaid_act.attrs["mode"] - mode_overlaying = overlaying_act.attrs["mode"] - group_overlaid = overlaid_act.attrs["group"] - group_overlaying = overlaying_act.attrs["group"] - - msgs = [] - if owner_overlaid != owner_overlaying: - msgs.append(_("owner: {0} does not match overlaid " - "package owner: {1}").format(owner_overlaying, - owner_overlaid)) - if mode_overlaid != mode_overlaying: - msgs.append(_("mode: {0} does not match overlaid " - "package mode: {1}").format(mode_overlaying, - mode_overlaid)) - if group_overlaid != group_overlaying: - msgs.append(_("group: {0} does not match overlaid " - "package group: {1}").format(group_overlaying, - group_overlaid)) - if not msgs: - return - - item_id = str(overlaying[0]) - act_id = overlaid_act.distinguished_name() - msg_level = MSG_INFO - result = "OK" - - if o_attr_overlaid == "deny" or o_attr_overlaying == "deny": - msg_level = MSG_ERROR - - # Check if there is already an FMRI-level message; - # update it or add a new one if necessary. - item_msgs = self.pd.get_parsable_item_messages() - added_msgs = None - if item_id in item_msgs and "messages" in item_msgs[item_id]: - added_msgs = item_msgs[item_id]["messages"] - add_msg = False - if added_msgs: - if (added_msgs[0]["msg_level"] == MSG_INFO and - msg_level == MSG_ERROR): - # Empty the current message list. - added_msgs[:] = [] - add_msg = True - else: - add_msg = True + newdirs.append(d) + dirs[:] = newdirs + for f in sorted(files): timestamp = misc.time_to_timestamp(time.time()) - if add_msg: - self.pd.add_item_message(item_id, timestamp, msg_level, - _("{pkg_name:70} {result:>7}").format( - pkg_name=overlaying[0].get_pkg_stem(), - result=result)) - self.pd.add_item_message(act_id, timestamp, msg_level, - _("Overlaid package: {0}").format( - overlaid[0].get_pkg_stem()), parent=item_id) - - for msg in msgs: - if msg_level == MSG_ERROR: - imsg = _("ERROR: {0}").format(msg) - else: - imsg = msg - self.pd.add_item_message(act_id, timestamp, msg_level, - imsg, parent=item_id) - - def __verify_fmris(self, repairs, args, proposed_fmris, pt, verifypaths, - overlaypaths): - """Verify FRMIs.""" - - path_only = bool(verifypaths or overlaypaths) - overlay_entries = {} - def_pkgs = {} # deferred packages - def_acts = {} # deferred actions - for pfmri in proposed_fmris: - entries = [] - needs_fix = [] - result = "OK" - failed = False - msg_level = MSG_INFO - - # Since every entry returned by verify might not be - # something needing repair, the relevant information - # for each package must be accumulated first to find - # an overall success/failure result and then the - # related messages output for it. - verify_path_count = len(verifypaths) - overlay_path_count = len(overlaypaths) - for act, errors, warnings, pinfo, overlay in \ - self.image.verify(pfmri, pt, - verifypaths=verifypaths, - overlaypaths=overlaypaths, verbose=True, - forever=True): - if not path_only and overlay: - path = act.attrs.get("path") - if path not in overlay_entries: - overlay_entries[path] = [] - overlay_entries[path].append( - (pfmri, act, overlay)) - - if pfmri not in def_acts: - def_acts[pfmri] = [] - def_acts[pfmri].append((act, overlay)) - else: - entries.append((act, errors, warnings, - pinfo, None)) - # Try to determine the package's status and - # message type. This is subject to change if - # the package contains overlay actions. - if errors: - failed = True - result = "ERROR" - msg_level = MSG_ERROR - # Some errors are based on policy (e.g. - # signature policy) and not a specific - # action, so act may be None. - needs_fix.append((pfmri, act)) - elif not failed and warnings: - result = "WARNING" - msg_level = MSG_WARNING - - # Defer final processing of package if verification was - # deferred for any of its actions. - if pfmri in def_acts: - def_pkgs[pfmri] = entries - continue + path = os.path.normpath(os.path.join(orig_root, f)) + if path not in allentries or allentries[path] not in [ + "file", + "link", + "hardlink", + ]: + self.pd.add_item_message( + _("file: {0}").format(path), + timestamp, + MSG_INFO, + _("Unpackaged file"), + msg_type=MSG_UNPACKAGED, + parent="unpackaged", + ) + + def __process_msgs( + self, entries, pfmri, msg_level, result, needs_fix, repairs + ): + """Generate plan message for verify result.""" + + timestamp = misc.time_to_timestamp(time.time()) + ffmri = str(pfmri) + self.pd.add_item_message( + ffmri, + timestamp, + msg_level, + _("{pkg_name:70} {result:>7}").format( + pkg_name=pfmri.get_pkg_stem(), result=result + ), + ) + timestamp = misc.time_to_timestamp(time.time()) + + for act, errors, warnings, info, oing_fmri in entries: + if act: + item_id = act.distinguished_name() + if oing_fmri: + item_id += " (from {0})".format( + oing_fmri.get_pkg_stem(anarchy=True) + ) + parent = ffmri + else: + item_id = ffmri + parent = None + for x in errors: + self.pd.add_item_message( + item_id, + timestamp, + MSG_ERROR, + _("ERROR: {0}").format(x), + parent=parent, + ) + for x in warnings: + self.pd.add_item_message( + item_id, + timestamp, + MSG_WARNING, + _("WARNING: {0}").format(x), + parent=parent, + ) + for x in info: + self.pd.add_item_message( + item_id, + timestamp, + MSG_INFO, + _("{0}").format(x), + parent=parent, + ) - if (path_only and verify_path_count == len(verifypaths) - and overlay_path_count == len(overlaypaths)): - # When verifying paths, omit packages without any - # matches from output. - continue - self.__process_msgs(entries, pfmri, msg_level, result, - needs_fix, repairs) - if path_only and not overlaypaths and not verifypaths: - return - - # No need to proceed for path only case. - if path_only: - return - - # Process deferred actions. - for pfmri, entries in def_acts.items(): - for entry in entries: - self.__process_per_overlay_action(args, pfmri, - entry, def_pkgs, verifypaths, - overlaypaths, overlay_entries, pt) - - # Generate messages for all processed packages with overlay - # actions. - for pfmri, entries in def_pkgs.items(): - failed = False - result = "OK" - msg_level = MSG_INFO - needs_fix = [] - for act, errors, warnings, pinfo, oing_fmri in entries: - # Try to determine the package's status and - # message type. - if errors: - failed = True - result = "ERROR" - msg_level = MSG_ERROR - if oing_fmri: - # Only append overlaying action - # if not all packages are - # verified. Otherwise, the - # overlaying action will be - # append later. - if args: - needs_fix.append(( - oing_fmri, act)) - else: - needs_fix.append((pfmri, act)) - elif not failed and warnings: - result = "WARNING" - msg_level = MSG_WARNING - - self.__process_msgs(entries, pfmri, msg_level, result, - needs_fix, repairs) - - # Generate overlay-specific messages. - for path, entries in overlay_entries.items(): - overlaid = None - overlaying = None - for e in entries: - if e[2] == "overlaid": - overlaid = e - elif e[2] == "overlaying": - overlaying = e - if overlaid and overlaying: - self.__check_attr_mismatch_between_actions( - overlaid, overlaying) - - def plan_fix(self, args, unpackaged=False, unpackaged_only=False, - verify_paths=misc.EmptyI): - """Determine the changes needed to fix the image.""" - - self.__plan_op() - self.__evaluate_excludes() - - pt = self.__progtrack - pt.plan_all_start() - - if args: - proposed_dict, self.__match_rm = self.__match_user_fmris( - self.image, args, self.MATCH_INST_VERSIONS) - - # merge patterns together - proposed_fixes = sorted(set([ - f - for each in proposed_dict.values() - for f in each - ])) + if needs_fix: + # Eliminate policy-based entries with no repair + # action. + for x in needs_fix: + if x[0] in repairs: + if x[1] is not None: + repairs[x[0]].append(x[1]) else: - # No FMRIs specified, verify all packages - proposed_fixes = list(self.image.gen_installed_pkgs( - ordered=True)) - - repairs = {} - overlaypaths = set() - verifypaths = set(a.lstrip(os.path.sep) for a in verify_paths) - - if not verify_paths: - pt.plan_start(pt.PLAN_PKG_VERIFY, goal=len(proposed_fixes)) - - # Verify unpackaged contents. - if unpackaged or unpackaged_only: - dup_be_name = "duplicate_livebe_for_verify" - try: - self.__process_unpackaged( - proposed_fixes, pt=pt, - dup_be_name=dup_be_name) - finally: - # Clean up the BE used for verify. - bootenv.BootEnv.cleanup_be(dup_be_name) - pt.plan_done(pt.PLAN_PKG_VERIFY) - if unpackaged_only: - self.__finish_plan(plandesc.EVALUATED_PKGS) - return - # Otherwise we reset the goals for packaged - # contents. - pt.plan_start(pt.PLAN_PKG_VERIFY, goal=len( - proposed_fixes)) - self.__verify_fmris(repairs, args, proposed_fixes, pt, - verifypaths, overlaypaths) + if x[1] is None: + repairs[x[0]] = [] + else: + repairs[x[0]] = [x[1]] + + def __get_overlaying_act_from_cache(self, path, overlay_entries): + """Get overlaying action from overlay_entries cache.""" + + for e in overlay_entries[path]: + if e[2] == "overlaying": + return e[0], e[1] + return None, None + + def __add_to_processed( + self, oing_fmri, overlay, def_pkgs, pfmri, act, errors, warnings, pinfo + ): + """Add newly processed actions results into cache.""" + + # If found overlaying package. + if oing_fmri: + # If the action is an overlaid one, attach the + # overlaying fmri for msg print. + if overlay == "overlaid": + def_pkgs[pfmri].append( + (act, errors, warnings, pinfo, oing_fmri) + ) + # The overlaying action is itself. + else: + def_pkgs[pfmri].append((act, errors, warnings, pinfo, None)) + else: + def_pkgs[pfmri].append((act, errors, warnings, pinfo, None)) + + def __process_per_overlay_action( + self, + args, + pfmri, + entry, + def_pkgs, + verifypaths, + overlaypaths, + overlay_entries, + pt, + ): + """Process per overlay action.""" + + act = entry[0] + overlay = entry[1] + path = act.attrs.get("path") + # Try the overlay_entries cache first. + oing_fmri, oing_act = self.__get_overlaying_act_from_cache( + path, overlay_entries + ) + # If found, process it directly. + if oing_act: + ( + act, + errors, + warnings, + pinfo, + oing_fmri, + ) = self.__process_verify_result( + act, + pfmri, + pt, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + ovlying_fmri=oing_fmri, + ovlying_act=oing_act, + ) + elif args: + # Not all fmris were processed, we need to find the + # overlaying action. + # + # Also need to collect newly found overlaying + # actions into overlay_entries if any. + ( + act, + errors, + warnings, + pinfo, + oing_fmri, + ) = self.__process_verify_result( + act, + pfmri, + pt, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + ovly_entries=overlay_entries, + ) + else: + # All fmris were processed, if the cache didn't contain + # the action, that means no overlaying action. + # + # We need to skip overlaying action finding, since we + # already know no overlaying action in the cache. + ( + act, + errors, + warnings, + pinfo, + oing_fmri, + ) = self.__process_verify_result( + act, + pfmri, + pt, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + skip_ovlying=True, + ) + self.__add_to_processed( + oing_fmri, overlay, def_pkgs, pfmri, act, errors, warnings, pinfo + ) + + def __check_attr_mismatch_between_actions(self, overlaid, overlaying): + """Check attribute mismatch between overlaying and overlaid + actions.""" + + overlaid_act = overlaid[1] + overlaying_act = overlaying[1] + o_attr_overlaid = overlaid_act.attrs.get("overlay-attributes") + o_attr_overlaying = overlaying_act.attrs.get("overlay-attributes") + owner_overlaid = overlaid_act.attrs["owner"] + owner_overlaying = overlaying_act.attrs["owner"] + mode_overlaid = overlaid_act.attrs["mode"] + mode_overlaying = overlaying_act.attrs["mode"] + group_overlaid = overlaid_act.attrs["group"] + group_overlaying = overlaying_act.attrs["group"] + + msgs = [] + if owner_overlaid != owner_overlaying: + msgs.append( + _( + "owner: {0} does not match overlaid " "package owner: {1}" + ).format(owner_overlaying, owner_overlaid) + ) + if mode_overlaid != mode_overlaying: + msgs.append( + _( + "mode: {0} does not match overlaid " "package mode: {1}" + ).format(mode_overlaying, mode_overlaid) + ) + if group_overlaid != group_overlaying: + msgs.append( + _( + "group: {0} does not match overlaid " "package group: {1}" + ).format(group_overlaying, group_overlaid) + ) + if not msgs: + return + + item_id = str(overlaying[0]) + act_id = overlaid_act.distinguished_name() + msg_level = MSG_INFO + result = "OK" + + if o_attr_overlaid == "deny" or o_attr_overlaying == "deny": + msg_level = MSG_ERROR + + # Check if there is already an FMRI-level message; + # update it or add a new one if necessary. + item_msgs = self.pd.get_parsable_item_messages() + added_msgs = None + if item_id in item_msgs and "messages" in item_msgs[item_id]: + added_msgs = item_msgs[item_id]["messages"] + add_msg = False + if added_msgs: + if ( + added_msgs[0]["msg_level"] == MSG_INFO + and msg_level == MSG_ERROR + ): + # Empty the current message list. + added_msgs[:] = [] + add_msg = True + else: + add_msg = True + + timestamp = misc.time_to_timestamp(time.time()) + if add_msg: + self.pd.add_item_message( + item_id, + timestamp, + msg_level, + _("{pkg_name:70} {result:>7}").format( + pkg_name=overlaying[0].get_pkg_stem(), result=result + ), + ) + self.pd.add_item_message( + act_id, + timestamp, + msg_level, + _("Overlaid package: {0}").format(overlaid[0].get_pkg_stem()), + parent=item_id, + ) + + for msg in msgs: + if msg_level == MSG_ERROR: + imsg = _("ERROR: {0}").format(msg) + else: + imsg = msg + self.pd.add_item_message( + act_id, timestamp, msg_level, imsg, parent=item_id + ) + + def __verify_fmris( + self, repairs, args, proposed_fmris, pt, verifypaths, overlaypaths + ): + """Verify FRMIs.""" + + path_only = bool(verifypaths or overlaypaths) + overlay_entries = {} + def_pkgs = {} # deferred packages + def_acts = {} # deferred actions + for pfmri in proposed_fmris: + entries = [] + needs_fix = [] + result = "OK" + failed = False + msg_level = MSG_INFO + + # Since every entry returned by verify might not be + # something needing repair, the relevant information + # for each package must be accumulated first to find + # an overall success/failure result and then the + # related messages output for it. + verify_path_count = len(verifypaths) + overlay_path_count = len(overlaypaths) + for act, errors, warnings, pinfo, overlay in self.image.verify( + pfmri, + pt, + verifypaths=verifypaths, + overlaypaths=overlaypaths, + verbose=True, + forever=True, + ): + if not path_only and overlay: + path = act.attrs.get("path") + if path not in overlay_entries: + overlay_entries[path] = [] + overlay_entries[path].append((pfmri, act, overlay)) + + if pfmri not in def_acts: + def_acts[pfmri] = [] + def_acts[pfmri].append((act, overlay)) else: - pt.plan_start(pt.PLAN_PKG_VERIFY, goal=len(verifypaths)) - - self.__verify_fmris(repairs, args, proposed_fixes, pt, - verifypaths, overlaypaths) - - timestamp = misc.time_to_timestamp(time.time()) - for path_not_found in verifypaths: - pt.plan_add_progress(pt.PLAN_PKG_VERIFY) - self.pd.add_item_message("path not found", - timestamp, MSG_WARNING, - _("{path} is not found in the image").format( - path=path_not_found)) - - if args and overlaypaths: - # Only perform verification for the rest of packages - # if FMRIs are provided and there are actions with - # overlay=allow found in those FMRIs. In the second - # pass, only look for actions with overlay=true. - pfixes = set(proposed_fixes) - path_fmri = [ - f - for f in self.image.gen_installed_pkgs( - ordered=True) - if f not in pfixes - ] - self.__verify_fmris(repairs, args, path_fmri, pt, - set(), overlaypaths) + entries.append((act, errors, warnings, pinfo, None)) + # Try to determine the package's status and + # message type. This is subject to change if + # the package contains overlay actions. + if errors: + failed = True + result = "ERROR" + msg_level = MSG_ERROR + # Some errors are based on policy (e.g. + # signature policy) and not a specific + # action, so act may be None. + needs_fix.append((pfmri, act)) + elif not failed and warnings: + result = "WARNING" + msg_level = MSG_WARNING + + # Defer final processing of package if verification was + # deferred for any of its actions. + if pfmri in def_acts: + def_pkgs[pfmri] = entries + continue + + if ( + path_only + and verify_path_count == len(verifypaths) + and overlay_path_count == len(overlaypaths) + ): + # When verifying paths, omit packages without any + # matches from output. + continue + self.__process_msgs( + entries, pfmri, msg_level, result, needs_fix, repairs + ) + if path_only and not overlaypaths and not verifypaths: + return + + # No need to proceed for path only case. + if path_only: + return + + # Process deferred actions. + for pfmri, entries in def_acts.items(): + for entry in entries: + self.__process_per_overlay_action( + args, + pfmri, + entry, + def_pkgs, + verifypaths, + overlaypaths, + overlay_entries, + pt, + ) + # Generate messages for all processed packages with overlay + # actions. + for pfmri, entries in def_pkgs.items(): + failed = False + result = "OK" + msg_level = MSG_INFO + needs_fix = [] + for act, errors, warnings, pinfo, oing_fmri in entries: + # Try to determine the package's status and + # message type. + if errors: + failed = True + result = "ERROR" + msg_level = MSG_ERROR + if oing_fmri: + # Only append overlaying action + # if not all packages are + # verified. Otherwise, the + # overlaying action will be + # append later. + if args: + needs_fix.append((oing_fmri, act)) + else: + needs_fix.append((pfmri, act)) + elif not failed and warnings: + result = "WARNING" + msg_level = MSG_WARNING + + self.__process_msgs( + entries, pfmri, msg_level, result, needs_fix, repairs + ) + + # Generate overlay-specific messages. + for path, entries in overlay_entries.items(): + overlaid = None + overlaying = None + for e in entries: + if e[2] == "overlaid": + overlaid = e + elif e[2] == "overlaying": + overlaying = e + if overlaid and overlaying: + self.__check_attr_mismatch_between_actions(overlaid, overlaying) + + def plan_fix( + self, + args, + unpackaged=False, + unpackaged_only=False, + verify_paths=misc.EmptyI, + ): + """Determine the changes needed to fix the image.""" + + self.__plan_op() + self.__evaluate_excludes() + + pt = self.__progtrack + pt.plan_all_start() + + if args: + proposed_dict, self.__match_rm = self.__match_user_fmris( + self.image, args, self.MATCH_INST_VERSIONS + ) + + # merge patterns together + proposed_fixes = sorted( + set([f for each in proposed_dict.values() for f in each]) + ) + else: + # No FMRIs specified, verify all packages + proposed_fixes = list(self.image.gen_installed_pkgs(ordered=True)) + + repairs = {} + overlaypaths = set() + verifypaths = set(a.lstrip(os.path.sep) for a in verify_paths) + + if not verify_paths: + pt.plan_start(pt.PLAN_PKG_VERIFY, goal=len(proposed_fixes)) + + # Verify unpackaged contents. + if unpackaged or unpackaged_only: + dup_be_name = "duplicate_livebe_for_verify" + try: + self.__process_unpackaged( + proposed_fixes, pt=pt, dup_be_name=dup_be_name + ) + finally: + # Clean up the BE used for verify. + bootenv.BootEnv.cleanup_be(dup_be_name) pt.plan_done(pt.PLAN_PKG_VERIFY) - # If no repairs, finish the plan. - if not repairs: - self.__finish_plan(plandesc.EVALUATED_PKGS) - return - - # Repair anything we failed to verify. - pt.plan_start(pt.PLAN_PKG_FIX, goal=len(repairs)) - for fmri, actions in repairs.items(): - pt.plan_add_progress(pt.PLAN_PKG_FIX) - # Need to get all variants otherwise evaluating the - # pkgplan will fail in signature verification. - m = self.image.get_manifest(fmri, ignore_excludes=True) - pp = pkgplan.PkgPlan(self.image) - pp.propose_repair(fmri, m, actions, []) - pp.evaluate(self.__old_excludes, self.__new_excludes) - self.pd.pkg_plans.append(pp) - - pt.plan_done(pt.PLAN_PKG_FIX) - pt.plan_all_done() - self.__finish_plan(plandesc.EVALUATED_PKGS) - - def plan_noop(self): - """Create a plan that doesn't change the package contents of - the current image.""" - self.__plan_op() - self.pd._fmri_changes = [] - self.pd.state = plandesc.EVALUATED_PKGS - - @staticmethod - def __fmris2dict(fmri_list): - return dict([ - (f.pkg_name, f) - for f in fmri_list - ]) - - @staticmethod - def __dicts2fmrichanges(olddict, newdict): - return [ - (olddict.get(k, None), newdict.get(k, None)) - for k in set(list(olddict.keys()) + list(newdict.keys())) - ] - - def reboot_advised(self): - """Check if evaluated imageplan suggests a reboot""" - assert self.state >= plandesc.MERGED_OK - return self.pd._actuators.reboot_advised() - - def reboot_needed(self): - """Check if evaluated imageplan requires a reboot""" - assert self.pd.state >= plandesc.MERGED_OK - return self.pd._actuators.reboot_needed() - - def boot_archive_needed(self): - """True if boot archive needs to be rebuilt""" - assert self.pd.state >= plandesc.MERGED_OK - return self.pd._need_boot_archive - - def get_solver_errors(self): - """Returns a list of strings for all FMRIs evaluated by the - solver explaining why they were rejected. (All packages - found in solver's trim database.)""" - return self.pd.get_solver_errors() - - def get_plan(self, full=True): - if full: - return str(self) - - output = "" - for t in self.pd._fmri_changes: - output += "{0} -> {1}\n".format(*t) - return output - - def gen_new_installed_pkgs(self): - """Generates all the fmris which will be in the new image.""" - assert self.pd.state >= plandesc.EVALUATED_PKGS - fmri_set = set(self.image.gen_installed_pkgs()) - - for p in self.pd.pkg_plans: - p.update_pkg_set(fmri_set) - - for pfmri in fmri_set: - yield pfmri - - def __gen_only_new_installed_info(self): - """Generates fmri-manifest pairs for all packages which are - being installed (or fixed, etc.).""" - assert self.pd.state >= plandesc.EVALUATED_PKGS - - for p in self.pd.pkg_plans: - if p.destination_fmri: - assert p.destination_manifest - yield p.destination_fmri, p.destination_manifest + if unpackaged_only: + self.__finish_plan(plandesc.EVALUATED_PKGS) + return + # Otherwise we reset the goals for packaged + # contents. + pt.plan_start(pt.PLAN_PKG_VERIFY, goal=len(proposed_fixes)) + self.__verify_fmris( + repairs, args, proposed_fixes, pt, verifypaths, overlaypaths + ) + else: + pt.plan_start(pt.PLAN_PKG_VERIFY, goal=len(verifypaths)) + + self.__verify_fmris( + repairs, args, proposed_fixes, pt, verifypaths, overlaypaths + ) + + timestamp = misc.time_to_timestamp(time.time()) + for path_not_found in verifypaths: + pt.plan_add_progress(pt.PLAN_PKG_VERIFY) + self.pd.add_item_message( + "path not found", + timestamp, + MSG_WARNING, + _("{path} is not found in the image").format( + path=path_not_found + ), + ) - def __gen_outgoing_info(self): - """Generates fmri-manifest pairs for all the packages which are - being removed.""" - assert self.pd.state >= plandesc.EVALUATED_PKGS + if args and overlaypaths: + # Only perform verification for the rest of packages + # if FMRIs are provided and there are actions with + # overlay=allow found in those FMRIs. In the second + # pass, only look for actions with overlay=true. + pfixes = set(proposed_fixes) + path_fmri = [ + f + for f in self.image.gen_installed_pkgs(ordered=True) + if f not in pfixes + ] + self.__verify_fmris( + repairs, args, path_fmri, pt, set(), overlaypaths + ) - for p in self.pd.pkg_plans: - if p.origin_fmri and \ - p.origin_fmri != p.destination_fmri: - assert p.origin_manifest - yield p.origin_fmri, p.origin_manifest - - def gen_new_installed_actions_bytype(self, atype, implicit_dirs=False): - """Generates actions of type 'atype' from the packages in the - future image.""" - - return self.__gen_star_actions_bytype(atype, - self.gen_new_installed_pkgs, implicit_dirs=implicit_dirs) - - def gen_only_new_installed_actions_bytype(self, atype, - implicit_dirs=False, excludes=misc.EmptyI): - """Generates actions of type 'atype' from packages being - installed.""" - - return self.__gen_star_actions_bytype_from_extant_manifests( - atype, self.__gen_only_new_installed_info, excludes, - implicit_dirs=implicit_dirs) - - def gen_outgoing_actions_bytype(self, atype, - implicit_dirs=False, excludes=misc.EmptyI): - """Generates actions of type 'atype' from packages being - removed (not necessarily actions being removed).""" - - return self.__gen_star_actions_bytype_from_extant_manifests( - atype, self.__gen_outgoing_info, excludes, - implicit_dirs=implicit_dirs) - - def __gen_star_actions_bytype(self, atype, generator, implicit_dirs=False): - """Generate installed actions of type 'atype' from the package - fmris emitted by 'generator'. If 'implicit_dirs' is True, then - when 'atype' is 'dir', directories only implicitly delivered - in the image will be emitted as well.""" - - assert self.pd.state >= plandesc.EVALUATED_PKGS - - # Don't bother accounting for implicit directories if we're not - # looking for them. + pt.plan_done(pt.PLAN_PKG_VERIFY) + # If no repairs, finish the plan. + if not repairs: + self.__finish_plan(plandesc.EVALUATED_PKGS) + return + + # Repair anything we failed to verify. + pt.plan_start(pt.PLAN_PKG_FIX, goal=len(repairs)) + for fmri, actions in repairs.items(): + pt.plan_add_progress(pt.PLAN_PKG_FIX) + # Need to get all variants otherwise evaluating the + # pkgplan will fail in signature verification. + m = self.image.get_manifest(fmri, ignore_excludes=True) + pp = pkgplan.PkgPlan(self.image) + pp.propose_repair(fmri, m, actions, []) + pp.evaluate(self.__old_excludes, self.__new_excludes) + self.pd.pkg_plans.append(pp) + + pt.plan_done(pt.PLAN_PKG_FIX) + pt.plan_all_done() + self.__finish_plan(plandesc.EVALUATED_PKGS) + + def plan_noop(self): + """Create a plan that doesn't change the package contents of + the current image.""" + self.__plan_op() + self.pd._fmri_changes = [] + self.pd.state = plandesc.EVALUATED_PKGS + + @staticmethod + def __fmris2dict(fmri_list): + return dict([(f.pkg_name, f) for f in fmri_list]) + + @staticmethod + def __dicts2fmrichanges(olddict, newdict): + return [ + (olddict.get(k, None), newdict.get(k, None)) + for k in set(list(olddict.keys()) + list(newdict.keys())) + ] + + def reboot_advised(self): + """Check if evaluated imageplan suggests a reboot""" + assert self.state >= plandesc.MERGED_OK + return self.pd._actuators.reboot_advised() + + def reboot_needed(self): + """Check if evaluated imageplan requires a reboot""" + assert self.pd.state >= plandesc.MERGED_OK + return self.pd._actuators.reboot_needed() + + def boot_archive_needed(self): + """True if boot archive needs to be rebuilt""" + assert self.pd.state >= plandesc.MERGED_OK + return self.pd._need_boot_archive + + def get_solver_errors(self): + """Returns a list of strings for all FMRIs evaluated by the + solver explaining why they were rejected. (All packages + found in solver's trim database.)""" + return self.pd.get_solver_errors() + + def get_plan(self, full=True): + if full: + return str(self) + + output = "" + for t in self.pd._fmri_changes: + output += "{0} -> {1}\n".format(*t) + return output + + def gen_new_installed_pkgs(self): + """Generates all the fmris which will be in the new image.""" + assert self.pd.state >= plandesc.EVALUATED_PKGS + fmri_set = set(self.image.gen_installed_pkgs()) + + for p in self.pd.pkg_plans: + p.update_pkg_set(fmri_set) + + for pfmri in fmri_set: + yield pfmri + + def __gen_only_new_installed_info(self): + """Generates fmri-manifest pairs for all packages which are + being installed (or fixed, etc.).""" + assert self.pd.state >= plandesc.EVALUATED_PKGS + + for p in self.pd.pkg_plans: + if p.destination_fmri: + assert p.destination_manifest + yield p.destination_fmri, p.destination_manifest + + def __gen_outgoing_info(self): + """Generates fmri-manifest pairs for all the packages which are + being removed.""" + assert self.pd.state >= plandesc.EVALUATED_PKGS + + for p in self.pd.pkg_plans: + if p.origin_fmri and p.origin_fmri != p.destination_fmri: + assert p.origin_manifest + yield p.origin_fmri, p.origin_manifest + + def gen_new_installed_actions_bytype(self, atype, implicit_dirs=False): + """Generates actions of type 'atype' from the packages in the + future image.""" + + return self.__gen_star_actions_bytype( + atype, self.gen_new_installed_pkgs, implicit_dirs=implicit_dirs + ) + + def gen_only_new_installed_actions_bytype( + self, atype, implicit_dirs=False, excludes=misc.EmptyI + ): + """Generates actions of type 'atype' from packages being + installed.""" + + return self.__gen_star_actions_bytype_from_extant_manifests( + atype, + self.__gen_only_new_installed_info, + excludes, + implicit_dirs=implicit_dirs, + ) + + def gen_outgoing_actions_bytype( + self, atype, implicit_dirs=False, excludes=misc.EmptyI + ): + """Generates actions of type 'atype' from packages being + removed (not necessarily actions being removed).""" + + return self.__gen_star_actions_bytype_from_extant_manifests( + atype, + self.__gen_outgoing_info, + excludes, + implicit_dirs=implicit_dirs, + ) + + def __gen_star_actions_bytype(self, atype, generator, implicit_dirs=False): + """Generate installed actions of type 'atype' from the package + fmris emitted by 'generator'. If 'implicit_dirs' is True, then + when 'atype' is 'dir', directories only implicitly delivered + in the image will be emitted as well.""" + + assert self.pd.state >= plandesc.EVALUATED_PKGS + + # Don't bother accounting for implicit directories if we're not + # looking for them. + if implicit_dirs: + if atype != "dir": + implicit_dirs = False + else: + da = pkg.actions.directory.DirectoryAction + + for pfmri in generator(): + m = self.image.get_manifest(pfmri, ignore_excludes=True) + if implicit_dirs: + dirs = set() # Keep track of explicit dirs + for act in m.gen_actions_by_type( + atype, excludes=self.__new_excludes + ): if implicit_dirs: - if atype != "dir": - implicit_dirs = False - else: - da = pkg.actions.directory.DirectoryAction - - for pfmri in generator(): - m = self.image.get_manifest(pfmri, ignore_excludes=True) - if implicit_dirs: - dirs = set() # Keep track of explicit dirs - for act in m.gen_actions_by_type(atype, - excludes=self.__new_excludes): - if implicit_dirs: - dirs.add(act.attrs["path"]) - yield act, pfmri - if implicit_dirs: - for d in m.get_directories(self.__new_excludes): - if d not in dirs: - yield da(path=d, implicit="true"), pfmri - - def __gen_star_actions_bytype_from_extant_manifests(self, atype, - generator, excludes, implicit_dirs=False): - """Generate installed actions of type 'atype' from the package - manifests emitted by 'generator'. 'excludes' is a list of - variants and facets which should be excluded from the actions - generated. If 'implicit_dirs' is True, then when 'atype' is - 'dir', directories only implicitly delivered in the image will - be emitted as well.""" - - assert self.pd.state >= plandesc.EVALUATED_PKGS - - # Don't bother accounting for implicit directories if we're not - # looking for them. + dirs.add(act.attrs["path"]) + yield act, pfmri + if implicit_dirs: + for d in m.get_directories(self.__new_excludes): + if d not in dirs: + yield da(path=d, implicit="true"), pfmri + + def __gen_star_actions_bytype_from_extant_manifests( + self, atype, generator, excludes, implicit_dirs=False + ): + """Generate installed actions of type 'atype' from the package + manifests emitted by 'generator'. 'excludes' is a list of + variants and facets which should be excluded from the actions + generated. If 'implicit_dirs' is True, then when 'atype' is + 'dir', directories only implicitly delivered in the image will + be emitted as well.""" + + assert self.pd.state >= plandesc.EVALUATED_PKGS + + # Don't bother accounting for implicit directories if we're not + # looking for them. + if implicit_dirs: + if atype != "dir": + implicit_dirs = False + else: + da = pkg.actions.directory.DirectoryAction + + for pfmri, m in generator(): + if implicit_dirs: + dirs = set() # Keep track of explicit dirs + for act in m.gen_actions_by_type(atype, excludes=excludes): if implicit_dirs: - if atype != "dir": - implicit_dirs = False - else: - da = pkg.actions.directory.DirectoryAction - - for pfmri, m in generator(): - if implicit_dirs: - dirs = set() # Keep track of explicit dirs - for act in m.gen_actions_by_type(atype, - excludes=excludes): - if implicit_dirs: - dirs.add(act.attrs["path"]) - yield act, pfmri - - if implicit_dirs: - for d in m.get_directories(excludes): - if d not in dirs: - yield da(path=d, - implicit="true"), pfmri - - def __get_directories(self): - """ return set of all directories in target image """ - # always consider var and the image directory fixed in image... - if self.__directories == None: - # It's faster to build a large set and make a small - # update to it than to do the reverse. - dirs = set(( - os.path.normpath(d[0].attrs["path"]) - for d in self.gen_new_installed_actions_bytype("dir", - implicit_dirs=True) - )) - dirs.update([ - self.image.imgdir.rstrip("/"), - "var", - "var/sadm", - "var/sadm/install" - ]) - self.__directories = dirs - return self.__directories - - def __get_symlinks(self): - """ return a set of all symlinks in target image""" - if self.__symlinks == None: - self.__symlinks = set(( - a.attrs["path"] - for a, pfmri in self.gen_new_installed_actions_bytype("link") - )) - return self.__symlinks - - def __get_hardlinks(self): - """ return a set of all hardlinks in target image""" - if self.__hardlinks == None: - self.__hardlinks = set(( - a.attrs["path"] - for a, pfmri in self.gen_new_installed_actions_bytype("hardlink") - )) - return self.__hardlinks - - def __get_licenses(self): - """ return a set of all licenses in target image""" - if self.__licenses == None: - self.__licenses = set(( - a.attrs["license"] - for a, pfmri in self.gen_new_installed_actions_bytype("license") - )) - return self.__licenses - - def __get_legacy(self): - """ return a set of all legacy actions in target image""" - if self.__legacy == None: - self.__legacy = set(( - a.attrs["pkg"] - for a, pfmri in self.gen_new_installed_actions_bytype("legacy") - )) - return self.__legacy - - @staticmethod - def __check_inconsistent_types(actions, oactions): - """Check whether multiple action types within a namespace group - deliver to a given name in that space.""" - - ntypes = set((a[0].name for a in actions)) - otypes = set((a[0].name for a in oactions)) - - # We end up with nothing at this path, or start and end with one - # of the same type, or we just add one type to an empty path. - if len(ntypes) == 0 or (len(ntypes) == 1 and len(otypes) <= 1): - return None - - # We have fewer types, so actions are getting removed. - if len(ntypes) < len(otypes): - # If we still end up in a broken state, signal the - # caller that we should move forward, but not remove - # anything at this path. Note that the type on the - # filesystem may not match any of the remaining types. - if len(ntypes) > 1: - return "nothing", None - - assert len(ntypes) == 1 - - # If we end up in a sane state, signal the caller that - # we should make sure the right contents are in place. - # This implies that the actions remove() method should - # handle when the action isn't present. - if actions[0][0].name != "dir": - return "fixup", actions[0] - - # If we end up with a directory, then we need to be - # careful to choose a non-implicit directory as the - # fixup action. - for a in actions: - if "implicit" not in a[0].attrs: - return "fixup", a - else: - # If we only have implicit directories left, - # make up the rest of the attributes. - a[0].attrs.update({"mode": "0755", "owner": - "root", "group": "root"}) - return "fixup", a - - # If the broken packages remain unchanged across the plan, then - # we can ignore it. We just check that the packages haven't - # changed. - sort_key = operator.itemgetter(1) - actions = sorted(actions, key=sort_key) - oactions = sorted(oactions, key=sort_key) - if ntypes == otypes and \ - all(o[1] == n[1] for o, n in zip(oactions, actions)): - return "nothing", None - - return "error", actions - - @staticmethod - def __check_duplicate_actions(actions, oactions): - """Check whether we deliver more than one action with a given - key attribute value if only a single action of that type and - value may be delivered.""" - - # We end up with no actions or start with one or none and end - # with exactly one. - if len(actions) == 0 or (len(oactions) <= len(actions) == 1): - if (len(oactions) > 1 and - any(a[0].attrs.get("overlay") == "true" - for a in oactions)): - # If more than one action is being removed and - # one of them is an overlay, then suppress - # removal of the overlaid actions (if any) to - # ensure preserve rules of overlay action apply. - return "overlay", None - return None - - # Removing actions. - if len(actions) < len(oactions): - # If any of the new actions is an overlay, suppress - # the removal of the overlaid action. - if any(a[0].attrs.get("overlay") == "true" - for a in actions): - return "overlay", None - - # If we still end up in a broken state, signal the - # caller that we should move forward, but not remove - # any actions. - if len(actions) > 1 or \ - any("preserve" in a[0].attrs for a in actions): - return "nothing", None - - # If we end up in a sane state, signal the caller that - # we should make sure the right contents are in place. - # This implies that the action's remove() method should - # handle when the action isn't present. - return "fixup", actions[0] - - # If the broken paths remain unchanged across the plan, then we - # can ignore it. We have to resort to stringifying the actions - # in order to sort them since the usual sort is much lighter - # weight. - oactions.sort(key=lambda x: str(x[0])) - actions.sort(key=lambda x: str(x[0])) - if len(oactions) == len(actions) and \ - all(o[0] == n[0] for o, n, in zip(oactions, actions)): - return "nothing", None - - # For file actions, delivery of two actions to a single point is - # permitted if: - # * there are only two actions in conflict - # * one action has 'preserve' set and 'overlay=allow' - # * the other action has 'overlay=true' - if len(actions) == 2: - overlayable = overlay = None - for act, ignored in actions: - if (act.name == "file" and - act.attrs.get("overlay") == "allow" and - "preserve" in act.attrs): - overlayable = act - elif (act.name == "file" and - act.attrs.get("overlay") == "true"): - overlay = act - if overlayable and overlay: - # Found both an overlayable action and the - # action that overlays it. - ignore = ["preserve"] - # If neither overlay nor overlayable action - # has "deny" set in "overlay-attributes" - if ("deny" not in overlay.attrlist( - "overlay-attributes") and "deny" not in - overlayable.attrlist( - "overlay-attributes")): - ignore.extend(["owner", "group", - "mode", "sysattr"]) - # Need to verify mismatched attributes between - # overlaying action and overlaid action in - # testsuite. - elif DebugValues[ - "broken-conflicting-action-handling"]: - ignore.extend(["owner", "group", - "mode", "sysattr"]) - errors = ImagePlan.__find_inconsistent_attrs( - actions, ignore=ignore) - if errors: - # overlay is not permitted if unique - # attributes (except 'preserve') are - # inconsistent - return ("error", actions, - api_errors.InconsistentActionAttributeError) - return "overlay", None - - return "error", actions - - @staticmethod - def __find_inconsistent_attrs(actions, ignore=misc.EmptyI): - """Find all the problem Action pairs. - - 'ignore' is an optional list of attributes to ignore when - checking for inconsistent attributes. By default, all - attributes listed in the 'unique_attrs' property of an - Action are checked. - """ - - # We iterate over all pairs of actions to see if any conflict - # with the rest. If two actions are "safe" together, then we - # can ignore one of them for the rest of the run, since we can - # compare the rest of the actions against just one copy of - # essentially identical actions. - seen = set() - problems = [] - for a1, a2 in itertools.combinations(actions, 2): - # Implicit directories don't contribute to problems. - if a1[0].name == "dir" and "implicit" in a1[0].attrs: - continue - - if a2 in seen: - continue - - # Find the attributes which are different between the - # two actions, and if there are none, skip the action. - # We have to treat "implicit" specially for implicit - # directories because none of the attributes except for - # "path" will exist. - diffs = a1[0].differences(a2[0]) - if not diffs or "implicit" in diffs: - seen.add(a2) - continue - - # If none of the different attributes is one that must - # be identical, then we can skip this action. - if not any( - d for d in diffs - if (d in a1[0].unique_attrs and - d not in ignore)): - seen.add(a2) - continue - - if ((a1[0].name == "link" or a1[0].name == "hardlink") and - (a1[0].attrs.get("mediator") == a2[0].attrs.get("mediator")) and - (a1[0].attrs.get("mediator-version") != a2[0].attrs.get("mediator-version") or - a1[0].attrs.get("mediator-implementation") != a2[0].attrs.get("mediator-implementation"))): - # If two links share the same mediator and have - # different mediator versions and/or - # implementations, then permit them to collide. - # The imageplan will select which ones to remove - # and install based on the mediator configuration - # in the image. - seen.add(a2) - continue - - problems.append((a1, a2)) - - return problems - - @staticmethod - def __check_inconsistent_attrs(actions, oactions): - """Check whether we have non-identical actions delivering to the - same point in their namespace.""" - - nproblems = ImagePlan.__find_inconsistent_attrs(actions) - oproblems = ImagePlan.__find_inconsistent_attrs(oactions) - - # If we end up with more problems than we started with, we - # should error out. If we end up with the same number as - # before, then we simply leave things alone. And if we end up - # with fewer, then we try to clean up. - if len(nproblems) > len(oproblems): - return "error", actions - elif not nproblems and not oproblems: - return - elif len(nproblems) == len(oproblems): - return "nothing", None - else: - if actions[0][0].name != "dir": - return "fixup", actions[0] - - # Find a non-implicit directory action to use - for a in actions: - if "implicit" not in a[0].attrs: - return "fixup", a - else: - return "nothing", None - - def __propose_fixup(self, inst_action, rem_action, pfmri): - """Add to the current plan a pseudo repair plan to fix up - correctable conflicts.""" - - pp, install, remove = self.__fixups.get(pfmri, - (None, None, None)) - if pp is None: - pp = pkgplan.PkgPlan(self.image) - if inst_action: - install = [inst_action] - remove = [] - else: - install = [] - remove = [rem_action] - self.__fixups[pfmri] = pp, install, remove - elif inst_action: - install.append(inst_action) - else: - remove.append(rem_action) - - def __evaluate_fixups(self): - nfm = manifest.NullFactoredManifest - for pfmri, (pp, install, remove) in self.__fixups.items(): - pp.propose_repair(pfmri, nfm, install, remove, - autofix=True) - pp.evaluate(self.__old_excludes, self.__new_excludes) - self.pd.pkg_plans.append(pp) - - # Repairs end up going into the package plan's update - # and remove lists, so _ActionPlans needed to be - # appended for each action in this fixup pkgplan to - # the list of related actions. - for action in install: - if 'path' in action.attrs and \ - self.__check_excluded(action.attrs['path']): - continue - self.pd.update_actions.append( - _ActionPlan(pp, None, action)) - for action in remove: - if 'path' in action.attrs and \ - self.__check_excluded(action.attrs['path']): - continue - self.pd.removal_actions.append( - _ActionPlan(pp, action, None)) - - # Don't process this particular set of fixups again. - self.__fixups = {} - - def __process_conflicts(self, key, func, actions, oactions, errclass, errs): - """The conflicting action checking functions all need to be - dealt with in a similar fashion, so we do that work in one - place.""" - - ret = func(actions, oactions) - if ret is None: - return False - - if len(ret) == 3: - # Allow checking functions to override default errclass. - msg, actions, errclass = ret - else: - msg, actions = ret - - if not isinstance(msg, six.string_types): - return False - - if msg == "nothing": - for i, ap in enumerate(self.pd.removal_actions): - if ap and ap.src.attrs.get(ap.src.key_attr, - None) == key: - self.pd.removal_actions[i] = None - elif msg == "overlay": - pp_needs_trimming = {} - moved = set() - # Suppress install and update of overlaid file. - for al in (self.pd.install_actions, - self.pd.update_actions): - for i, ap in enumerate(al): - if not ap: - # Action has been removed. - continue - - attrs = ap.dst.attrs - if attrs.get(ap.dst.key_attr) != key: - if ("preserve" in attrs and - "original_name" in attrs): - # Possible move to a - # different location for - # editable file. - # Overlay attribute is - # not checked in case it - # was dropped as part of - # move. - moved.add( - attrs["original_name"]) - continue - - if attrs.get("overlay") != "allow": - # Only care about overlaid - # actions. - continue - - # Remove conflicting, overlaid actions - # from plan. - al[i] = None - pp_needs_trimming.setdefault(id(ap.p), - { "plan": ap.p, "trim": [] }) - pp_needs_trimming[id(ap.p)]["trim"].append( - id(ap.dst)) - break - - # Suppress removal of overlaid file. - al = self.pd.removal_actions - for i, ap in enumerate(al): - if not ap: - continue - - attrs = ap.src.attrs - if not attrs.get(ap.src.key_attr) == key: - continue - - if attrs.get("overlay") != "allow": - # Only interested in overlaid actions. - continue - - orig_name = attrs.get("original_name", - "{0}:{1}".format(ap.p.origin_fmri.get_name(), - attrs["path"])) - if orig_name in moved: - # File has moved locations; removal will - # be executed, but file will be saved - # for the move skipping unlink. - ap.src.attrs["save_file"] = \ - [orig_name, "false"] - break - - al[i] = None - pp_needs_trimming.setdefault(id(ap.p), - { "plan": ap.p, "trim": [] }) - pp_needs_trimming[id(ap.p)]["trim"].append( - id(ap.src)) - break - - for entry in pp_needs_trimming.values(): - p = entry["plan"] - trim = entry["trim"] - # Can't modify the p.actions tuple, so modify - # the added member in-place. - for prop in ("added", "changed", "removed"): - pval = getattr(p.actions, prop) - pval[:] = [ - a - for a in pval - if id(a[1]) not in trim - ] - elif msg == "fixup": - self.__propose_fixup(actions[0], None, actions[1]) - elif msg == "error": - errs.append(errclass(actions)) - else: - assert False, "{0}() returned something other than " \ - "'nothing', 'overlay', 'error', or 'fixup': '{1}'".format( - func.__name__, msg) - + dirs.add(act.attrs["path"]) + yield act, pfmri + + if implicit_dirs: + for d in m.get_directories(excludes): + if d not in dirs: + yield da(path=d, implicit="true"), pfmri + + def __get_directories(self): + """return set of all directories in target image""" + # always consider var and the image directory fixed in image... + if self.__directories == None: + # It's faster to build a large set and make a small + # update to it than to do the reverse. + dirs = set( + ( + os.path.normpath(d[0].attrs["path"]) + for d in self.gen_new_installed_actions_bytype( + "dir", implicit_dirs=True + ) + ) + ) + dirs.update( + [ + self.image.imgdir.rstrip("/"), + "var", + "var/sadm", + "var/sadm/install", + ] + ) + self.__directories = dirs + return self.__directories + + def __get_symlinks(self): + """return a set of all symlinks in target image""" + if self.__symlinks == None: + self.__symlinks = set( + ( + a.attrs["path"] + for a, pfmri in self.gen_new_installed_actions_bytype( + "link" + ) + ) + ) + return self.__symlinks + + def __get_hardlinks(self): + """return a set of all hardlinks in target image""" + if self.__hardlinks == None: + self.__hardlinks = set( + ( + a.attrs["path"] + for a, pfmri in self.gen_new_installed_actions_bytype( + "hardlink" + ) + ) + ) + return self.__hardlinks + + def __get_licenses(self): + """return a set of all licenses in target image""" + if self.__licenses == None: + self.__licenses = set( + ( + a.attrs["license"] + for a, pfmri in self.gen_new_installed_actions_bytype( + "license" + ) + ) + ) + return self.__licenses + + def __get_legacy(self): + """return a set of all legacy actions in target image""" + if self.__legacy == None: + self.__legacy = set( + ( + a.attrs["pkg"] + for a, pfmri in self.gen_new_installed_actions_bytype( + "legacy" + ) + ) + ) + return self.__legacy + + @staticmethod + def __check_inconsistent_types(actions, oactions): + """Check whether multiple action types within a namespace group + deliver to a given name in that space.""" + + ntypes = set((a[0].name for a in actions)) + otypes = set((a[0].name for a in oactions)) + + # We end up with nothing at this path, or start and end with one + # of the same type, or we just add one type to an empty path. + if len(ntypes) == 0 or (len(ntypes) == 1 and len(otypes) <= 1): + return None + + # We have fewer types, so actions are getting removed. + if len(ntypes) < len(otypes): + # If we still end up in a broken state, signal the + # caller that we should move forward, but not remove + # anything at this path. Note that the type on the + # filesystem may not match any of the remaining types. + if len(ntypes) > 1: + return "nothing", None + + assert len(ntypes) == 1 + + # If we end up in a sane state, signal the caller that + # we should make sure the right contents are in place. + # This implies that the actions remove() method should + # handle when the action isn't present. + if actions[0][0].name != "dir": + return "fixup", actions[0] + + # If we end up with a directory, then we need to be + # careful to choose a non-implicit directory as the + # fixup action. + for a in actions: + if "implicit" not in a[0].attrs: + return "fixup", a + else: + # If we only have implicit directories left, + # make up the rest of the attributes. + a[0].attrs.update( + {"mode": "0755", "owner": "root", "group": "root"} + ) + return "fixup", a + + # If the broken packages remain unchanged across the plan, then + # we can ignore it. We just check that the packages haven't + # changed. + sort_key = operator.itemgetter(1) + actions = sorted(actions, key=sort_key) + oactions = sorted(oactions, key=sort_key) + if ntypes == otypes and all( + o[1] == n[1] for o, n in zip(oactions, actions) + ): + return "nothing", None + + return "error", actions + + @staticmethod + def __check_duplicate_actions(actions, oactions): + """Check whether we deliver more than one action with a given + key attribute value if only a single action of that type and + value may be delivered.""" + + # We end up with no actions or start with one or none and end + # with exactly one. + if len(actions) == 0 or (len(oactions) <= len(actions) == 1): + if len(oactions) > 1 and any( + a[0].attrs.get("overlay") == "true" for a in oactions + ): + # If more than one action is being removed and + # one of them is an overlay, then suppress + # removal of the overlaid actions (if any) to + # ensure preserve rules of overlay action apply. + return "overlay", None + return None + + # Removing actions. + if len(actions) < len(oactions): + # If any of the new actions is an overlay, suppress + # the removal of the overlaid action. + if any(a[0].attrs.get("overlay") == "true" for a in actions): + return "overlay", None + + # If we still end up in a broken state, signal the + # caller that we should move forward, but not remove + # any actions. + if len(actions) > 1 or any( + "preserve" in a[0].attrs for a in actions + ): + return "nothing", None + + # If we end up in a sane state, signal the caller that + # we should make sure the right contents are in place. + # This implies that the action's remove() method should + # handle when the action isn't present. + return "fixup", actions[0] + + # If the broken paths remain unchanged across the plan, then we + # can ignore it. We have to resort to stringifying the actions + # in order to sort them since the usual sort is much lighter + # weight. + oactions.sort(key=lambda x: str(x[0])) + actions.sort(key=lambda x: str(x[0])) + if len(oactions) == len(actions) and all( + o[0] == n[0] for o, n, in zip(oactions, actions) + ): + return "nothing", None + + # For file actions, delivery of two actions to a single point is + # permitted if: + # * there are only two actions in conflict + # * one action has 'preserve' set and 'overlay=allow' + # * the other action has 'overlay=true' + if len(actions) == 2: + overlayable = overlay = None + for act, ignored in actions: + if ( + act.name == "file" + and act.attrs.get("overlay") == "allow" + and "preserve" in act.attrs + ): + overlayable = act + elif act.name == "file" and act.attrs.get("overlay") == "true": + overlay = act + if overlayable and overlay: + # Found both an overlayable action and the + # action that overlays it. + ignore = ["preserve"] + # If neither overlay nor overlayable action + # has "deny" set in "overlay-attributes" + if "deny" not in overlay.attrlist( + "overlay-attributes" + ) and "deny" not in overlayable.attrlist("overlay-attributes"): + ignore.extend(["owner", "group", "mode", "sysattr"]) + # Need to verify mismatched attributes between + # overlaying action and overlaid action in + # testsuite. + elif DebugValues["broken-conflicting-action-handling"]: + ignore.extend(["owner", "group", "mode", "sysattr"]) + errors = ImagePlan.__find_inconsistent_attrs( + actions, ignore=ignore + ) + if errors: + # overlay is not permitted if unique + # attributes (except 'preserve') are + # inconsistent + return ( + "error", + actions, + api_errors.InconsistentActionAttributeError, + ) + return "overlay", None + + return "error", actions + + @staticmethod + def __find_inconsistent_attrs(actions, ignore=misc.EmptyI): + """Find all the problem Action pairs. + + 'ignore' is an optional list of attributes to ignore when + checking for inconsistent attributes. By default, all + attributes listed in the 'unique_attrs' property of an + Action are checked. + """ + + # We iterate over all pairs of actions to see if any conflict + # with the rest. If two actions are "safe" together, then we + # can ignore one of them for the rest of the run, since we can + # compare the rest of the actions against just one copy of + # essentially identical actions. + seen = set() + problems = [] + for a1, a2 in itertools.combinations(actions, 2): + # Implicit directories don't contribute to problems. + if a1[0].name == "dir" and "implicit" in a1[0].attrs: + continue + + if a2 in seen: + continue + + # Find the attributes which are different between the + # two actions, and if there are none, skip the action. + # We have to treat "implicit" specially for implicit + # directories because none of the attributes except for + # "path" will exist. + diffs = a1[0].differences(a2[0]) + if not diffs or "implicit" in diffs: + seen.add(a2) + continue + + # If none of the different attributes is one that must + # be identical, then we can skip this action. + if not any( + d + for d in diffs + if (d in a1[0].unique_attrs and d not in ignore) + ): + seen.add(a2) + continue + + if ( + (a1[0].name == "link" or a1[0].name == "hardlink") + and (a1[0].attrs.get("mediator") == a2[0].attrs.get("mediator")) + and ( + a1[0].attrs.get("mediator-version") + != a2[0].attrs.get("mediator-version") + or a1[0].attrs.get("mediator-implementation") + != a2[0].attrs.get("mediator-implementation") + ) + ): + # If two links share the same mediator and have + # different mediator versions and/or + # implementations, then permit them to collide. + # The imageplan will select which ones to remove + # and install based on the mediator configuration + # in the image. + seen.add(a2) + continue + + problems.append((a1, a2)) + + return problems + + @staticmethod + def __check_inconsistent_attrs(actions, oactions): + """Check whether we have non-identical actions delivering to the + same point in their namespace.""" + + nproblems = ImagePlan.__find_inconsistent_attrs(actions) + oproblems = ImagePlan.__find_inconsistent_attrs(oactions) + + # If we end up with more problems than we started with, we + # should error out. If we end up with the same number as + # before, then we simply leave things alone. And if we end up + # with fewer, then we try to clean up. + if len(nproblems) > len(oproblems): + return "error", actions + elif not nproblems and not oproblems: + return + elif len(nproblems) == len(oproblems): + return "nothing", None + else: + if actions[0][0].name != "dir": + return "fixup", actions[0] + + # Find a non-implicit directory action to use + for a in actions: + if "implicit" not in a[0].attrs: + return "fixup", a + else: + return "nothing", None + + def __propose_fixup(self, inst_action, rem_action, pfmri): + """Add to the current plan a pseudo repair plan to fix up + correctable conflicts.""" + + pp, install, remove = self.__fixups.get(pfmri, (None, None, None)) + if pp is None: + pp = pkgplan.PkgPlan(self.image) + if inst_action: + install = [inst_action] + remove = [] + else: + install = [] + remove = [rem_action] + self.__fixups[pfmri] = pp, install, remove + elif inst_action: + install.append(inst_action) + else: + remove.append(rem_action) + + def __evaluate_fixups(self): + nfm = manifest.NullFactoredManifest + for pfmri, (pp, install, remove) in self.__fixups.items(): + pp.propose_repair(pfmri, nfm, install, remove, autofix=True) + pp.evaluate(self.__old_excludes, self.__new_excludes) + self.pd.pkg_plans.append(pp) + + # Repairs end up going into the package plan's update + # and remove lists, so _ActionPlans needed to be + # appended for each action in this fixup pkgplan to + # the list of related actions. + for action in install: + if "path" in action.attrs and self.__check_excluded( + action.attrs["path"] + ): + continue + self.pd.update_actions.append(_ActionPlan(pp, None, action)) + for action in remove: + if "path" in action.attrs and self.__check_excluded( + action.attrs["path"] + ): + continue + self.pd.removal_actions.append(_ActionPlan(pp, action, None)) + + # Don't process this particular set of fixups again. + self.__fixups = {} + + def __process_conflicts(self, key, func, actions, oactions, errclass, errs): + """The conflicting action checking functions all need to be + dealt with in a similar fashion, so we do that work in one + place.""" + + ret = func(actions, oactions) + if ret is None: + return False + + if len(ret) == 3: + # Allow checking functions to override default errclass. + msg, actions, errclass = ret + else: + msg, actions = ret + + if not isinstance(msg, six.string_types): + return False + + if msg == "nothing": + for i, ap in enumerate(self.pd.removal_actions): + if ap and ap.src.attrs.get(ap.src.key_attr, None) == key: + self.pd.removal_actions[i] = None + elif msg == "overlay": + pp_needs_trimming = {} + moved = set() + # Suppress install and update of overlaid file. + for al in (self.pd.install_actions, self.pd.update_actions): + for i, ap in enumerate(al): + if not ap: + # Action has been removed. + continue + + attrs = ap.dst.attrs + if attrs.get(ap.dst.key_attr) != key: + if "preserve" in attrs and "original_name" in attrs: + # Possible move to a + # different location for + # editable file. + # Overlay attribute is + # not checked in case it + # was dropped as part of + # move. + moved.add(attrs["original_name"]) + continue + + if attrs.get("overlay") != "allow": + # Only care about overlaid + # actions. + continue + + # Remove conflicting, overlaid actions + # from plan. + al[i] = None + pp_needs_trimming.setdefault( + id(ap.p), {"plan": ap.p, "trim": []} + ) + pp_needs_trimming[id(ap.p)]["trim"].append(id(ap.dst)) + break + + # Suppress removal of overlaid file. + al = self.pd.removal_actions + for i, ap in enumerate(al): + if not ap: + continue + + attrs = ap.src.attrs + if not attrs.get(ap.src.key_attr) == key: + continue + + if attrs.get("overlay") != "allow": + # Only interested in overlaid actions. + continue + + orig_name = attrs.get( + "original_name", + "{0}:{1}".format( + ap.p.origin_fmri.get_name(), attrs["path"] + ), + ) + if orig_name in moved: + # File has moved locations; removal will + # be executed, but file will be saved + # for the move skipping unlink. + ap.src.attrs["save_file"] = [orig_name, "false"] + break + + al[i] = None + pp_needs_trimming.setdefault( + id(ap.p), {"plan": ap.p, "trim": []} + ) + pp_needs_trimming[id(ap.p)]["trim"].append(id(ap.src)) + break + + for entry in pp_needs_trimming.values(): + p = entry["plan"] + trim = entry["trim"] + # Can't modify the p.actions tuple, so modify + # the added member in-place. + for prop in ("added", "changed", "removed"): + pval = getattr(p.actions, prop) + pval[:] = [a for a in pval if id(a[1]) not in trim] + elif msg == "fixup": + self.__propose_fixup(actions[0], None, actions[1]) + elif msg == "error": + errs.append(errclass(actions)) + else: + assert False, ( + "{0}() returned something other than " + "'nothing', 'overlay', 'error', or 'fixup': '{1}'".format( + func.__name__, msg + ) + ) + + return True + + def __seed(self, gen_func, action_classes, excludes): + """Build a mapping from action keys to action, pfmri tuples for + a set of action types. + + The 'gen_func' is a function which takes an action type and + 'implicit_dirs' and returns action-pfmri pairs. + + The 'action_classes' parameter is a list of action types.""" + + d = {} + for klass in action_classes: + self.__progtrack.plan_add_progress( + self.__progtrack.PLAN_ACTION_CONFLICT + ) + for a, pfmri in gen_func( + klass.name, implicit_dirs=True, excludes=excludes + ): + d.setdefault(a.attrs[klass.key_attr], []).append((a, pfmri)) + return d + + @staticmethod + def __act_dup_check(tgt, key, actstr, fmristr): + """Check for duplicate actions/fmri tuples in 'tgt', which is + indexed by 'key'.""" + + # + # When checking for duplicate actions we have to account for + # the fact that actions which are part of a package plan are + # not stripped. But the actions we're iterating over here are + # coming from the stripped action cache, so they have had + # assorted attributes removed (like variants, facets, etc.) So + # to check for duplicates we have to make sure to strip the + # actions we're comparing against. Of course we can't just + # strip the actions which are part of a package plan because + # we could be removing data critical to the execution of that + # action like original_name, etc. So before we strip an + # action we have to make a copy of it. + # + # If we're dealing with a directory action and an "implicit" + # attribute exists, we need to preserve it. We assume it's a + # synthetic attribute that indicates that the action was + # created implicitly (and hence won't conflict with an + # explicit directory action defining the same directory). + # Note that we've assumed that no one will ever add an + # explicit "implicit" attribute to a directory action. + # + preserve = {"dir": ["implicit"]} + if key not in tgt: + return False + for act, pfmri in tgt[key]: + # check the fmri first since that's easy + if fmristr != str(pfmri): + continue + act = pkg.actions.fromstr(str(act)) + act.strip(preserve=preserve) + if actstr == str(act): return True - - def __seed(self, gen_func, action_classes, excludes): - """Build a mapping from action keys to action, pfmri tuples for - a set of action types. - - The 'gen_func' is a function which takes an action type and - 'implicit_dirs' and returns action-pfmri pairs. - - The 'action_classes' parameter is a list of action types.""" - - d = {} - for klass in action_classes: - self.__progtrack.plan_add_progress( - self.__progtrack.PLAN_ACTION_CONFLICT) - for a, pfmri in \ - gen_func(klass.name, implicit_dirs=True, - excludes=excludes): - d.setdefault(a.attrs[klass.key_attr], - []).append((a, pfmri)) - return d - - @staticmethod - def __act_dup_check(tgt, key, actstr, fmristr): - """Check for duplicate actions/fmri tuples in 'tgt', which is - indexed by 'key'.""" - - # - # When checking for duplicate actions we have to account for - # the fact that actions which are part of a package plan are - # not stripped. But the actions we're iterating over here are - # coming from the stripped action cache, so they have had - # assorted attributes removed (like variants, facets, etc.) So - # to check for duplicates we have to make sure to strip the - # actions we're comparing against. Of course we can't just - # strip the actions which are part of a package plan because - # we could be removing data critical to the execution of that - # action like original_name, etc. So before we strip an - # action we have to make a copy of it. - # - # If we're dealing with a directory action and an "implicit" - # attribute exists, we need to preserve it. We assume it's a - # synthetic attribute that indicates that the action was - # created implicitly (and hence won't conflict with an - # explicit directory action defining the same directory). - # Note that we've assumed that no one will ever add an - # explicit "implicit" attribute to a directory action. - # - preserve = {"dir": ["implicit"]} - if key not in tgt: - return False - for act, pfmri in tgt[key]: - # check the fmri first since that's easy - if fmristr != str(pfmri): - continue - act = pkg.actions.fromstr(str(act)) - act.strip(preserve=preserve) - if actstr == str(act): - return True - return False - - def __update_act(self, keys, tgt, skip_dups, offset_dict, - action_classes, sf, skip_fmris, fmri_dict): - """Update 'tgt' with action/fmri pairs from the stripped - action cache that are associated with the specified action - 'keys'. - - The 'skip_dups' parameter indicates if we should avoid adding - duplicate action/pfmri pairs into 'tgt'. - - The 'offset_dict' parameter contains a mapping from key to - offsets into the actions.stripped file and the number of lines - to read. - - The 'action_classes' parameter contains the list of action types - where one action can conflict with another action. - - The 'sf' parameter is the actions.stripped file from which we - read the actual actions indicated by the offset dictionary - 'offset_dict.' - - The 'skip_fmris' parameter contains a set of strings - representing the packages which we should not process actions - for. - - The 'fmri_dict' parameter is a cache of previously built PkgFmri - objects which is used so the same string isn't translated into - the same PkgFmri object multiple times.""" - - for key in keys: - offsets = [] - for klass in action_classes: - offset = offset_dict.get((klass.name, key), - None) - if offset is not None: - offsets.append(offset) - - for offset, cnt in offsets: - sf.seek(offset) - pns = None - i = 0 - while 1: - # sf is reading in binary mode - line = misc.force_str(sf.readline()) - i += 1 - if i > cnt: - break - line = line.rstrip() - if line == "": - break - fmristr, actstr = line.split(None, 1) - if fmristr in skip_fmris: - continue - act = pkg.actions.fromstr(actstr) - if act.attrs[act.key_attr] != key: - raise api_errors.InvalidPackageErrors([ - "{} has invalid manifest " - "line:".format( - fmristr), - " '{}'".format(actstr), - " '{}' vs. '{}'".format( - act.attrs[act.key_attr], - key - ) - ]) - assert pns is None or \ - act.namespace_group == pns - pns = act.namespace_group - - try: - pfmri = fmri_dict[fmristr] - except KeyError: - pfmri = pkg.fmri.PkgFmri( - fmristr) - fmri_dict[fmristr] = pfmri - if skip_dups and self.__act_dup_check( - tgt, key, actstr, fmristr): - continue - tgt.setdefault(key, []).append( - (act, pfmri)) - - def __fast_check(self, new, old, ns): - """Check whether actions being added and removed are - sufficiently similar that further conflict checking on those - actions isn't needed. - - The 'new' parameter is a dictionary mapping keys to the incoming - actions with that as a key. The incoming actions are actions - delivered by packages which are being installed or updated to. - - The 'old' parameter is a dictionary mapping keys to the outgoing - actions with that as a key. The outgoing actions are actions - delivered by packages which are being removed or updated from. - - The 'ns' parameter is the action namespace for the actions in - 'new' and 'old'.""" - - # .keys() is being used because we're removing keys from the - # dictionary as we go. - for key in list(new.keys()): - actions = new[key] - assert len(actions) > 0 - oactions = old.get(key, []) - # If new actions are being installed, then we need to do - # the full conflict checking. - if not oactions: - continue - - unmatched_old_actions = set(range(0, len(oactions))) - - # If the action isn't refcountable and there's more than - # one action, that's an error so we let - # __check_conflicts handle it. - entry = actions[0][0] - if not entry.refcountable and \ - entry.globally_identical and \ - len(actions) > 1: - continue - - # Check that each incoming action has a match in the - # outgoing actions. - next_key = False - for act, pfmri in actions: - matched = False - aname = act.name - aattrs = act.attrs - # Compare this action with each outgoing action. - for i, (oact, opfmri) in enumerate(oactions): - if aname != oact.name: - continue - # Check whether all attributes which - # need to be unique are identical for - # these two actions. - oattrs = oact.attrs - if all(( - aattrs.get(a) == oattrs.get(a) - for a in act.unique_attrs - )): - matched = True - break - - # If this action didn't have a match in the old - # action, then this key needs full conflict - # checking so move on to the next key. - if not matched: - next_key = True - break - unmatched_old_actions.discard(i) - if next_key: - continue - - # Check that each outgoing action has a match in the - # incoming actions. - for i, (oact, opfmri) in enumerate(oactions): - if i not in unmatched_old_actions: - continue - matched = False - for act, pfmri in actions: - if act.name != oact.name: - continue - if all(( - act.attrs.get(a) == - oact.attrs.get(a) - for a in act.unique_attrs - )): - matched = True - break - if not matched: - next_key = True - break - unmatched_old_actions.discard(i) - if next_key or unmatched_old_actions: - continue - # We know that each incoming action matches at least one - # outgoing action and each outgoing action matches at - # least one incoming action, so no further conflict - # checking is needed. - del new[key] - del old[key] - - # .keys() is being used because we're removing keys from the - # dictionary as we go. - for key in list(old.keys()): - # If actions that aren't in conflict are being removed, - # then nothing more needs to be done. - if key not in new: - del old[key] - - def __check_conflicts(self, new, old, action_classes, ns, - errs): - """Check all the newly installed actions for conflicts with - existing actions.""" - - for key, actions in six.iteritems(new): - oactions = old.get(key, []) - - self.__progtrack.plan_add_progress( - self.__progtrack.PLAN_ACTION_CONFLICT) - - if len(actions) == 1 and len(oactions) < 2: - continue - - # Actions delivering to the same point in a - # namespace group's namespace should have the - # same type. - if type(ns) != int: - if self.__process_conflicts(key, - self.__check_inconsistent_types, - actions, oactions, - api_errors.InconsistentActionTypeError, - errs): - continue - - # By virtue of the above check, all actions at - # this point in this namespace are the same. - assert(len(set(a[0].name for a in actions)) <= 1) - assert(len(set(a[0].name for a in oactions)) <= 1) - - # Multiple non-refcountable actions delivered to - # the same name is an error. - entry = actions[0][0] - if not entry.refcountable and entry.globally_identical: - if self.__process_conflicts(key, - self.__check_duplicate_actions, - actions, oactions, - api_errors.DuplicateActionError, - errs): - continue - - # Multiple refcountable but globally unique - # actions delivered to the same name must be - # identical. - elif entry.globally_identical: - if self.__process_conflicts(key, - self.__check_inconsistent_attrs, - actions, oactions, - api_errors.InconsistentActionAttributeError, - errs): - continue - - # Ensure that overlay and preserve file semantics are handled - # as expected when conflicts only exist in packages that are - # being removed. - for key, oactions in six.iteritems(old): - self.__progtrack.plan_add_progress( - self.__progtrack.PLAN_ACTION_CONFLICT) - - if len(oactions) < 2: - continue - - if key in new: - # Already processed. - continue - - if any(a[0].name != "file" for a in oactions): - continue - - entry = oactions[0][0] - if not entry.refcountable and entry.globally_identical: - if self.__process_conflicts(key, - self.__check_duplicate_actions, - [], oactions, - api_errors.DuplicateActionError, - errs): - continue - - @staticmethod - def _check_actions(nsd): - """Return the keys in the namespace dictionary ('nsd') which - map to actions that conflict with each other.""" - - def noop(*args): - return None - - bad_keys = set() - for ns, key_dict in six.iteritems(nsd): - if type(ns) != int: - type_func = ImagePlan.__check_inconsistent_types - else: - type_func = noop - for key, actions in six.iteritems(key_dict): - if len(actions) == 1: - continue - if type_func(actions, []) is not None: - bad_keys.add(key) - continue - if not actions[0][0].refcountable and \ - actions[0][0].globally_identical: - if ImagePlan.__check_duplicate_actions( - actions, []) is not None: - bad_keys.add(key) - continue - elif actions[0][0].globally_identical and \ - ImagePlan.__check_inconsistent_attrs( - actions, []) is not None: - bad_keys.add(key) - continue - return bad_keys - - def __clear_pkg_plans(self): - """Now that we're done reading the manifests, we can clear them - from the pkgplans.""" - - for p in self.pd.pkg_plans: - p.clear_dest_manifest() - p.clear_origin_manifest() - - def __find_all_conflicts(self): - """Find all instances of conflicting actions. - - There are three categories of conflicting actions. The first - involves the notion of a 'namespace group': a set of action - classes which install into the same namespace. The only example - of this is the set of filesystem actions: file, dir, link, and - hardlink. If more than one action delivers to a given pathname, - all of those actions need to be of the same type. - - The second category involves actions which cannot be delivered - multiple times to the same point in their namespace. For - example, files must be delivered exactly once, as must users, - but directories or symlinks can be delivered multiple times, and - we refcount them. - - The third category involves actions which may be delivered - multiple times, but all of those actions must be identical in - their core attributes. - """ - - # We need to be able to create broken images from the testsuite. - if DebugValues["broken-conflicting-action-handling"]: - self.__clear_pkg_plans() - return - - errs = [] - - pt = self.__progtrack - pt.plan_start(pt.PLAN_ACTION_CONFLICT) - - # Using strings instead of PkgFmri objects in sets allows for - # much faster performance. - new_fmris = set((str(s) for s in self.gen_new_installed_pkgs())) - - # If we're removing all packages, there won't be any conflicts. - if not new_fmris: - pt.plan_done(pt.PLAN_ACTION_CONFLICT) - self.__clear_pkg_plans() - return - - # figure out which installed packages are being removed by - # this operation - old_fmris = set(( - str(s) for s in self.image.gen_installed_pkgs() - )) - gone_fmris = old_fmris - new_fmris - - # figure out which new packages are being touched by this - # operation. - changing_fmris = set([ - str(p.destination_fmri) - for p in self.pd.pkg_plans - if p.destination_fmri - ]) - - # Group action types by namespace groups - kf = operator.attrgetter("namespace_group") - # Unequal types are not comparable in Python 3, therefore - # convert them to the same type 'int' first. - def key(a): - kf = a.namespace_group - if kf is None: - return -1 - elif kf == "path": - return 20 - return kf - types = sorted(six.itervalues(pkg.actions.types), key=key) - - namespace_dict = dict( - (ns, list(action_classes)) - for ns, action_classes in itertools.groupby(types, kf) + return False + + def __update_act( + self, + keys, + tgt, + skip_dups, + offset_dict, + action_classes, + sf, + skip_fmris, + fmri_dict, + ): + """Update 'tgt' with action/fmri pairs from the stripped + action cache that are associated with the specified action + 'keys'. + + The 'skip_dups' parameter indicates if we should avoid adding + duplicate action/pfmri pairs into 'tgt'. + + The 'offset_dict' parameter contains a mapping from key to + offsets into the actions.stripped file and the number of lines + to read. + + The 'action_classes' parameter contains the list of action types + where one action can conflict with another action. + + The 'sf' parameter is the actions.stripped file from which we + read the actual actions indicated by the offset dictionary + 'offset_dict.' + + The 'skip_fmris' parameter contains a set of strings + representing the packages which we should not process actions + for. + + The 'fmri_dict' parameter is a cache of previously built PkgFmri + objects which is used so the same string isn't translated into + the same PkgFmri object multiple times.""" + + for key in keys: + offsets = [] + for klass in action_classes: + offset = offset_dict.get((klass.name, key), None) + if offset is not None: + offsets.append(offset) + + for offset, cnt in offsets: + sf.seek(offset) + pns = None + i = 0 + while 1: + # sf is reading in binary mode + line = misc.force_str(sf.readline()) + i += 1 + if i > cnt: + break + line = line.rstrip() + if line == "": + break + fmristr, actstr = line.split(None, 1) + if fmristr in skip_fmris: + continue + act = pkg.actions.fromstr(actstr) + if act.attrs[act.key_attr] != key: + raise api_errors.InvalidPackageErrors( + [ + "{} has invalid manifest " + "line:".format(fmristr), + " '{}'".format(actstr), + " '{}' vs. '{}'".format( + act.attrs[act.key_attr], key + ), + ] + ) + assert pns is None or act.namespace_group == pns + pns = act.namespace_group + + try: + pfmri = fmri_dict[fmristr] + except KeyError: + pfmri = pkg.fmri.PkgFmri(fmristr) + fmri_dict[fmristr] = pfmri + if skip_dups and self.__act_dup_check( + tgt, key, actstr, fmristr + ): + continue + tgt.setdefault(key, []).append((act, pfmri)) + + def __fast_check(self, new, old, ns): + """Check whether actions being added and removed are + sufficiently similar that further conflict checking on those + actions isn't needed. + + The 'new' parameter is a dictionary mapping keys to the incoming + actions with that as a key. The incoming actions are actions + delivered by packages which are being installed or updated to. + + The 'old' parameter is a dictionary mapping keys to the outgoing + actions with that as a key. The outgoing actions are actions + delivered by packages which are being removed or updated from. + + The 'ns' parameter is the action namespace for the actions in + 'new' and 'old'.""" + + # .keys() is being used because we're removing keys from the + # dictionary as we go. + for key in list(new.keys()): + actions = new[key] + assert len(actions) > 0 + oactions = old.get(key, []) + # If new actions are being installed, then we need to do + # the full conflict checking. + if not oactions: + continue + + unmatched_old_actions = set(range(0, len(oactions))) + + # If the action isn't refcountable and there's more than + # one action, that's an error so we let + # __check_conflicts handle it. + entry = actions[0][0] + if ( + not entry.refcountable + and entry.globally_identical + and len(actions) > 1 + ): + continue + + # Check that each incoming action has a match in the + # outgoing actions. + next_key = False + for act, pfmri in actions: + matched = False + aname = act.name + aattrs = act.attrs + # Compare this action with each outgoing action. + for i, (oact, opfmri) in enumerate(oactions): + if aname != oact.name: + continue + # Check whether all attributes which + # need to be unique are identical for + # these two actions. + oattrs = oact.attrs + if all( + ( + aattrs.get(a) == oattrs.get(a) + for a in act.unique_attrs + ) + ): + matched = True + break + + # If this action didn't have a match in the old + # action, then this key needs full conflict + # checking so move on to the next key. + if not matched: + next_key = True + break + unmatched_old_actions.discard(i) + if next_key: + continue + + # Check that each outgoing action has a match in the + # incoming actions. + for i, (oact, opfmri) in enumerate(oactions): + if i not in unmatched_old_actions: + continue + matched = False + for act, pfmri in actions: + if act.name != oact.name: + continue + if all( + ( + act.attrs.get(a) == oact.attrs.get(a) + for a in act.unique_attrs + ) + ): + matched = True + break + if not matched: + next_key = True + break + unmatched_old_actions.discard(i) + if next_key or unmatched_old_actions: + continue + # We know that each incoming action matches at least one + # outgoing action and each outgoing action matches at + # least one incoming action, so no further conflict + # checking is needed. + del new[key] + del old[key] + + # .keys() is being used because we're removing keys from the + # dictionary as we go. + for key in list(old.keys()): + # If actions that aren't in conflict are being removed, + # then nothing more needs to be done. + if key not in new: + del old[key] + + def __check_conflicts(self, new, old, action_classes, ns, errs): + """Check all the newly installed actions for conflicts with + existing actions.""" + + for key, actions in six.iteritems(new): + oactions = old.get(key, []) + + self.__progtrack.plan_add_progress( + self.__progtrack.PLAN_ACTION_CONFLICT + ) + + if len(actions) == 1 and len(oactions) < 2: + continue + + # Actions delivering to the same point in a + # namespace group's namespace should have the + # same type. + if type(ns) != int: + if self.__process_conflicts( + key, + self.__check_inconsistent_types, + actions, + oactions, + api_errors.InconsistentActionTypeError, + errs, + ): + continue + + # By virtue of the above check, all actions at + # this point in this namespace are the same. + assert len(set(a[0].name for a in actions)) <= 1 + assert len(set(a[0].name for a in oactions)) <= 1 + + # Multiple non-refcountable actions delivered to + # the same name is an error. + entry = actions[0][0] + if not entry.refcountable and entry.globally_identical: + if self.__process_conflicts( + key, + self.__check_duplicate_actions, + actions, + oactions, + api_errors.DuplicateActionError, + errs, + ): + continue + + # Multiple refcountable but globally unique + # actions delivered to the same name must be + # identical. + elif entry.globally_identical: + if self.__process_conflicts( + key, + self.__check_inconsistent_attrs, + actions, + oactions, + api_errors.InconsistentActionAttributeError, + errs, + ): + continue + + # Ensure that overlay and preserve file semantics are handled + # as expected when conflicts only exist in packages that are + # being removed. + for key, oactions in six.iteritems(old): + self.__progtrack.plan_add_progress( + self.__progtrack.PLAN_ACTION_CONFLICT + ) + + if len(oactions) < 2: + continue + + if key in new: + # Already processed. + continue + + if any(a[0].name != "file" for a in oactions): + continue + + entry = oactions[0][0] + if not entry.refcountable and entry.globally_identical: + if self.__process_conflicts( + key, + self.__check_duplicate_actions, + [], + oactions, + api_errors.DuplicateActionError, + errs, + ): + continue + + @staticmethod + def _check_actions(nsd): + """Return the keys in the namespace dictionary ('nsd') which + map to actions that conflict with each other.""" + + def noop(*args): + return None + + bad_keys = set() + for ns, key_dict in six.iteritems(nsd): + if type(ns) != int: + type_func = ImagePlan.__check_inconsistent_types + else: + type_func = noop + for key, actions in six.iteritems(key_dict): + if len(actions) == 1: + continue + if type_func(actions, []) is not None: + bad_keys.add(key) + continue + if ( + not actions[0][0].refcountable + and actions[0][0].globally_identical + ): + if ( + ImagePlan.__check_duplicate_actions(actions, []) + is not None + ): + bad_keys.add(key) + continue + elif ( + actions[0][0].globally_identical + and ImagePlan.__check_inconsistent_attrs(actions, []) + is not None + ): + bad_keys.add(key) + continue + return bad_keys + + def __clear_pkg_plans(self): + """Now that we're done reading the manifests, we can clear them + from the pkgplans.""" + + for p in self.pd.pkg_plans: + p.clear_dest_manifest() + p.clear_origin_manifest() + + def __find_all_conflicts(self): + """Find all instances of conflicting actions. + + There are three categories of conflicting actions. The first + involves the notion of a 'namespace group': a set of action + classes which install into the same namespace. The only example + of this is the set of filesystem actions: file, dir, link, and + hardlink. If more than one action delivers to a given pathname, + all of those actions need to be of the same type. + + The second category involves actions which cannot be delivered + multiple times to the same point in their namespace. For + example, files must be delivered exactly once, as must users, + but directories or symlinks can be delivered multiple times, and + we refcount them. + + The third category involves actions which may be delivered + multiple times, but all of those actions must be identical in + their core attributes. + """ + + # We need to be able to create broken images from the testsuite. + if DebugValues["broken-conflicting-action-handling"]: + self.__clear_pkg_plans() + return + + errs = [] + + pt = self.__progtrack + pt.plan_start(pt.PLAN_ACTION_CONFLICT) + + # Using strings instead of PkgFmri objects in sets allows for + # much faster performance. + new_fmris = set((str(s) for s in self.gen_new_installed_pkgs())) + + # If we're removing all packages, there won't be any conflicts. + if not new_fmris: + pt.plan_done(pt.PLAN_ACTION_CONFLICT) + self.__clear_pkg_plans() + return + + # figure out which installed packages are being removed by + # this operation + old_fmris = set((str(s) for s in self.image.gen_installed_pkgs())) + gone_fmris = old_fmris - new_fmris + + # figure out which new packages are being touched by this + # operation. + changing_fmris = set( + [ + str(p.destination_fmri) + for p in self.pd.pkg_plans + if p.destination_fmri + ] + ) + + # Group action types by namespace groups + kf = operator.attrgetter("namespace_group") + + # Unequal types are not comparable in Python 3, therefore + # convert them to the same type 'int' first. + def key(a): + kf = a.namespace_group + if kf is None: + return -1 + elif kf == "path": + return 20 + return kf + + types = sorted(six.itervalues(pkg.actions.types), key=key) + + namespace_dict = dict( + (ns, list(action_classes)) + for ns, action_classes in itertools.groupby(types, kf) + ) + + pt.plan_add_progress(pt.PLAN_ACTION_CONFLICT) + # Load information about the actions currently on the system. + offset_dict = self.image._load_actdict(self.__progtrack) + sf = self.image._get_stripped_actions_file() + + conflict_clean_image = self.image._load_conflicting_keys() == set() + + fmri_dict = weakref.WeakValueDictionary() + # Iterate over action types in namespace groups first; our first + # check should be for action type consistency. + for ns, action_classes in six.iteritems(namespace_dict): + pt.plan_add_progress(pt.PLAN_ACTION_CONFLICT) + # There's no sense in checking actions which have no + # limits + if all(not c.globally_identical for c in action_classes): + continue + + # The 'new' dict contains information about the system + # as it will be. We start by accumulating actions from + # the manifests of the packages being installed. + new = self.__seed( + self.gen_only_new_installed_actions_bytype, + action_classes, + self.__new_excludes, + ) + + # The 'old' dict contains information about the system + # as it is now. We start by accumulating actions from + # the manifests of the packages being removed. + old = self.__seed( + self.gen_outgoing_actions_bytype, + action_classes, + self.__old_excludes, + ) + + if conflict_clean_image: + self.__fast_check(new, old, ns) + + with contextlib.closing( + mmap.mmap(sf.fileno(), 0, access=mmap.ACCESS_READ) + ) as msf: + # Skip file header. + msf.readline() + msf.readline() + + # Update 'old' with all actions from the action + # cache which could conflict with the new + # actions being installed, or with actions + # already installed, but not getting removed. + keys = set( + itertools.chain(six.iterkeys(new), six.iterkeys(old)) + ) + self.__update_act( + keys, + old, + False, + offset_dict, + action_classes, + msf, + gone_fmris, + fmri_dict, ) - pt.plan_add_progress(pt.PLAN_ACTION_CONFLICT) - # Load information about the actions currently on the system. - offset_dict = self.image._load_actdict(self.__progtrack) - sf = self.image._get_stripped_actions_file() - - conflict_clean_image = \ - self.image._load_conflicting_keys() == set() - - fmri_dict = weakref.WeakValueDictionary() - # Iterate over action types in namespace groups first; our first - # check should be for action type consistency. - for ns, action_classes in six.iteritems(namespace_dict): - pt.plan_add_progress(pt.PLAN_ACTION_CONFLICT) - # There's no sense in checking actions which have no - # limits - if all(not c.globally_identical - for c in action_classes): - continue + # Now update 'new' with all actions from the + # action cache which are staying on the system, + # and could conflict with the actions being + # installed. + keys = set(six.iterkeys(old)) + self.__update_act( + keys, + new, + True, + offset_dict, + action_classes, + msf, + gone_fmris | changing_fmris, + fmri_dict, + ) - # The 'new' dict contains information about the system - # as it will be. We start by accumulating actions from - # the manifests of the packages being installed. - new = self.__seed( - self.gen_only_new_installed_actions_bytype, - action_classes, self.__new_excludes) - - # The 'old' dict contains information about the system - # as it is now. We start by accumulating actions from - # the manifests of the packages being removed. - old = self.__seed(self.gen_outgoing_actions_bytype, - action_classes, self.__old_excludes) - - if conflict_clean_image: - self.__fast_check(new, old, ns) - - with contextlib.closing(mmap.mmap(sf.fileno(), 0, - access=mmap.ACCESS_READ)) as msf: - # Skip file header. - msf.readline() - msf.readline() - - # Update 'old' with all actions from the action - # cache which could conflict with the new - # actions being installed, or with actions - # already installed, but not getting removed. - keys = set(itertools.chain(six.iterkeys(new), - six.iterkeys(old))) - self.__update_act(keys, old, False, - offset_dict, action_classes, msf, - gone_fmris, fmri_dict) - - # Now update 'new' with all actions from the - # action cache which are staying on the system, - # and could conflict with the actions being - # installed. - keys = set(six.iterkeys(old)) - self.__update_act(keys, new, True, - offset_dict, action_classes, msf, - gone_fmris | changing_fmris, fmri_dict) - - self.__check_conflicts(new, old, action_classes, ns, - errs) - - del fmri_dict - self.__clear_pkg_plans() - sf.close() - self.__evaluate_fixups() - pt.plan_done(pt.PLAN_ACTION_CONFLICT) - - if errs: - raise api_errors.ConflictingActionErrors(errs) - - @staticmethod - def default_keyfunc(name, act): - """This is the default function used by get_actions when - the caller provides no key.""" - - attr_name = pkg.actions.types[name].key_attr - return act.attrs[attr_name] - - @staticmethod - def hardlink_keyfunc(name, act): - """Keyfunc used in evaluate when calling get_actions - for hardlinks.""" - - return act.get_target_path() - - def get_actions(self, name, key=None): - """Return a dictionary of actions of the type given by 'name' - describing the target image. If 'key' is given and not None, - the dictionary's key will be the name of the action type's key - attribute. Otherwise, it's a callable taking an action as an - argument which returns the key. This dictionary is cached for - quick future lookups.""" - if key is None: - key = self.default_keyfunc - - if (name, key) in self.__cached_actions: - return self.__cached_actions[(name, key)] - - d = {} - for act, pfmri in self.gen_new_installed_actions_bytype(name): - t = key(name, act) - d.setdefault(t, []).append(act) - self.__cached_actions[(name, key)] = d - return self.__cached_actions[(name, key)] - - def __get_manifest(self, pfmri, intent, ignore_excludes=False): - """Return manifest for pfmri""" - if pfmri: - return self.image.get_manifest(pfmri, - ignore_excludes=ignore_excludes or - self.pd._varcets_change, - intent=intent) - else: - return manifest.NullFactoredManifest - - def __create_intent(self, old_fmri, new_fmri, enabled_publishers): - """Return intent strings (or None). Given a pair - of fmris describing a package operation, this - routine returns intent strings to be passed to - originating publisher describing manifest - operations. We never send publisher info to - prevent cross-publisher leakage of info.""" - - if self.__noexecute: - return None, None - - __match_intent = dict() - __match_intent.update(self.__match_inst) - __match_intent.update(self.__match_rm) - __match_intent.update(self.__match_update) - - if new_fmri: - reference = __match_intent.get(new_fmri, None) - # don't leak prev. version info across publishers - if old_fmri: - if old_fmri.get_publisher() != \ - new_fmri.get_publisher(): - old_fmri = "unknown" - else: - old_fmri = \ - old_fmri.get_fmri(anarchy=True) - # don't send pub - new_fmri = new_fmri.get_fmri(anarchy=True) + self.__check_conflicts(new, old, action_classes, ns, errs) + + del fmri_dict + self.__clear_pkg_plans() + sf.close() + self.__evaluate_fixups() + pt.plan_done(pt.PLAN_ACTION_CONFLICT) + + if errs: + raise api_errors.ConflictingActionErrors(errs) + + @staticmethod + def default_keyfunc(name, act): + """This is the default function used by get_actions when + the caller provides no key.""" + + attr_name = pkg.actions.types[name].key_attr + return act.attrs[attr_name] + + @staticmethod + def hardlink_keyfunc(name, act): + """Keyfunc used in evaluate when calling get_actions + for hardlinks.""" + + return act.get_target_path() + + def get_actions(self, name, key=None): + """Return a dictionary of actions of the type given by 'name' + describing the target image. If 'key' is given and not None, + the dictionary's key will be the name of the action type's key + attribute. Otherwise, it's a callable taking an action as an + argument which returns the key. This dictionary is cached for + quick future lookups.""" + if key is None: + key = self.default_keyfunc + + if (name, key) in self.__cached_actions: + return self.__cached_actions[(name, key)] + + d = {} + for act, pfmri in self.gen_new_installed_actions_bytype(name): + t = key(name, act) + d.setdefault(t, []).append(act) + self.__cached_actions[(name, key)] = d + return self.__cached_actions[(name, key)] + + def __get_manifest(self, pfmri, intent, ignore_excludes=False): + """Return manifest for pfmri""" + if pfmri: + return self.image.get_manifest( + pfmri, + ignore_excludes=ignore_excludes or self.pd._varcets_change, + intent=intent, + ) + else: + return manifest.NullFactoredManifest + + def __create_intent(self, old_fmri, new_fmri, enabled_publishers): + """Return intent strings (or None). Given a pair + of fmris describing a package operation, this + routine returns intent strings to be passed to + originating publisher describing manifest + operations. We never send publisher info to + prevent cross-publisher leakage of info.""" + + if self.__noexecute: + return None, None + + __match_intent = dict() + __match_intent.update(self.__match_inst) + __match_intent.update(self.__match_rm) + __match_intent.update(self.__match_update) + + if new_fmri: + reference = __match_intent.get(new_fmri, None) + # don't leak prev. version info across publishers + if old_fmri: + if old_fmri.get_publisher() != new_fmri.get_publisher(): + old_fmri = "unknown" else: - reference = __match_intent.get(old_fmri, None) - # don't try to send intent info to disabled publisher - if old_fmri.get_publisher() in enabled_publishers: - # don't send pub - old_fmri = old_fmri.get_fmri(anarchy=True) - else: - old_fmri = None - - info = { - "operation": self.pd._op, - "old_fmri" : old_fmri, - "new_fmri" : new_fmri, - "reference": reference - } - - s = "({0})".format(";".join([ - "{0}={1}".format(key, info[key]) for key in info + old_fmri = old_fmri.get_fmri(anarchy=True) + # don't send pub + new_fmri = new_fmri.get_fmri(anarchy=True) + else: + reference = __match_intent.get(old_fmri, None) + # don't try to send intent info to disabled publisher + if old_fmri.get_publisher() in enabled_publishers: + # don't send pub + old_fmri = old_fmri.get_fmri(anarchy=True) + else: + old_fmri = None + + info = { + "operation": self.pd._op, + "old_fmri": old_fmri, + "new_fmri": new_fmri, + "reference": reference, + } + + s = "({0})".format( + ";".join( + [ + "{0}={1}".format(key, info[key]) + for key in info if info[key] is not None - ])) - - if new_fmri: - return None, s # only report new on upgrade - elif old_fmri: - return s, None # uninstall w/ enabled pub - else: - return None, None # uninstall w/ disabled pub - - def add_actuator(self, phase, name, value): - """Add an actuator to the plan. - - The actuator name ('reboot-needed', 'restart_fmri', etc.) is - given in 'name', and the fmri string or callable is given in - 'value'. The 'phase' parameter must be one of 'install', - 'remove', or 'update'. - """ - - if phase == "install": - d = self.pd._actuators.install - elif phase == "remove": - d = self.pd._actuators.removal - elif phase == "update": - d = self.pd._actuators.update - - if hasattr(value, "__call__"): - d[name] = value + ] + ) + ) + + if new_fmri: + return None, s # only report new on upgrade + elif old_fmri: + return s, None # uninstall w/ enabled pub + else: + return None, None # uninstall w/ disabled pub + + def add_actuator(self, phase, name, value): + """Add an actuator to the plan. + + The actuator name ('reboot-needed', 'restart_fmri', etc.) is + given in 'name', and the fmri string or callable is given in + 'value'. The 'phase' parameter must be one of 'install', + 'remove', or 'update'. + """ + + if phase == "install": + d = self.pd._actuators.install + elif phase == "remove": + d = self.pd._actuators.removal + elif phase == "update": + d = self.pd._actuators.update + + if hasattr(value, "__call__"): + d[name] = value + else: + d.setdefault(name, []).append(value) + + def __evaluate_pkg_preserved_files(self): + """Private helper function that determines which preserved files + have changed in ImagePlan and how.""" + + assert self.state >= plandesc.MERGED_OK + + pd = self.pd + + # Track movement of preserved ("editable") files for plan + # summary and cache management. + moved = [] + removed = [] + installed = [] + updated = [] + + # __merge_actions() adds the 'save_file' attribute to src + # actions that are being moved somewhere else and to dest + # actions that will be restored from a src action. This only + # happens when at least one of the files involved has a + # 'preserve' attribute, so it's safe to treat either as a + # 'preserved' ("editable") file. + + # The removal_actions are processed first since we'll determine + # how to transform them while processing the install and update + # actions based on the destination file state. + for ap in pd.removal_actions: + src = ap.src + if src.name != "file": + continue + if not ( + "preserve" in src.attrs + or "save_file" in src.attrs + or "overlay" in src.attrs + ): + # Removed action has to be a preserved file or a + # source of a restore. + continue + if "elfhash" in src.attrs: + # Ignore erroneously tagged files. + continue + + if src.attrs.get("preserve") in ("abandon", "install-only"): + # these files are never removed. + continue + + entry = [src.attrs["path"]] + save_file = src.attrs.get("save_file") + if save_file: + entry.append(save_file[0]) + entry.append(src) + removed.append(entry) + + for ap in itertools.chain(pd.install_actions, pd.update_actions): + orig = ap.src + dest = ap.dst + if dest.name != "file": + continue + + dpres_type = dest.attrs.get("preserve") + if not ( + ( + orig + and ( + "preserve" in orig.attrs + or "save_file" in orig.attrs + or "overlay" in orig.attrs + ) + ) + or ( + dpres_type + or "save_file" in dest.attrs + or "overlay" in dest.attrs + ) + ): + # At least one of the actions has to be a + # preserved file or a target of a restore. + continue + if "elfhash" in dest.attrs: + # Ignore erroneously tagged files. + continue + + tpath = dest.attrs["path"] + entry = [tpath] + save_file = dest.attrs.get("save_file") + if save_file: + tcache_name = save_file[0] + for ridx, rentry in enumerate(removed): + if len(rentry) == 1: + continue + + rpath, rcache_name, rorig = rentry + if rcache_name == tcache_name: + # If the cache name for this new + # file matches one of those for + # a removed file, the removed + # file will be renamed to this + # action's path before the + # action is processed. + del removed[ridx] + save_file = rpath + orig = rorig + break else: - d.setdefault(name, []).append(value) - - def __evaluate_pkg_preserved_files(self): - """Private helper function that determines which preserved files - have changed in ImagePlan and how.""" - - assert self.state >= plandesc.MERGED_OK - - pd = self.pd - - # Track movement of preserved ("editable") files for plan - # summary and cache management. - moved = [] - removed = [] - installed = [] - updated = [] - - # __merge_actions() adds the 'save_file' attribute to src - # actions that are being moved somewhere else and to dest - # actions that will be restored from a src action. This only - # happens when at least one of the files involved has a - # 'preserve' attribute, so it's safe to treat either as a - # 'preserved' ("editable") file. - - # The removal_actions are processed first since we'll determine - # how to transform them while processing the install and update - # actions based on the destination file state. - for ap in pd.removal_actions: - src = ap.src - if src.name != "file": - continue - if not ("preserve" in src.attrs or - "save_file" in src.attrs or - "overlay" in src.attrs): - # Removed action has to be a preserved file or a - # source of a restore. - continue - if "elfhash" in src.attrs: - # Ignore erroneously tagged files. - continue - - if src.attrs.get("preserve") in ("abandon", - "install-only"): - # these files are never removed. - continue - - entry = [src.attrs["path"]] - save_file = src.attrs.get("save_file") - if save_file: - entry.append(save_file[0]) - entry.append(src) - removed.append(entry) - - for ap in itertools.chain(pd.install_actions, - pd.update_actions): - orig = ap.src - dest = ap.dst - if dest.name != "file": - continue + save_file = None - dpres_type = dest.attrs.get("preserve") - if not ((orig and ("preserve" in orig.attrs or - "save_file" in orig.attrs or - "overlay" in orig.attrs)) or - (dpres_type or - "save_file" in dest.attrs or - "overlay" in dest.attrs)): - # At least one of the actions has to be a - # preserved file or a target of a restore. - continue - if "elfhash" in dest.attrs: - # Ignore erroneously tagged files. - continue + if not orig and dpres_type == "install-only": + # For install-only, we can rely on + # _check_preserve. + try: + if not dest._check_preserve(orig, ap.p): + installed.append(entry) + except EnvironmentError as e: + if e.errno != errno.EACCES: + raise + continue + elif not orig: + # We can't rely on _check_preserve for this case + # as there's no existing on-disk file at the + # destination path yet. + if dpres_type != "legacy" and dpres_type != "abandon": + # 'abandon' actions are never delivered; + # 'legacy' actions are only delivered if + # we're updating something already + # installed or moving an existing file. + installed.append(entry) + continue + elif orig.name != "file": + # File is being replaced with another object + # type. + updated.append(entry) + continue + + # The order of these checks is significant in + # determining how a preserved file changed! + # + # First, check for on-disk content changes. + opath = orig.get_installed_path(self.image.get_root()) + try: + pres_type = dest._check_preserve(orig, ap.p, orig_path=opath) + except EnvironmentError as e: + if e.errno == errno.EACCES: + continue + else: + raise + + final_path = dest.get_installed_path(self.image.get_root()) + + # If a removed action is going to be restored to + # complete the operation, show the removed action path + # as the source for the move omitting the steps + # in-between. For example: + # moved: testme -> newme + # moved: newme -> newme.legacy + # installed: newme + # ...becomes: + # moved: testme -> newme.legacy + # installed: newme + if save_file: + mpath = save_file + else: + mpath = tpath + + if pres_type == "abandon": + # newly-tagged preserve=abandon files never + # delivered. + continue + elif pres_type == "renameold": + moved.append([mpath, tpath + ".old"]) + installed.append(entry) + continue + elif pres_type == "renameold.update": + moved.append([mpath, tpath + ".update"]) + installed.append(entry) + continue + elif pres_type == "legacy": + if orig.attrs.get("preserve") == "legacy": + updated.append(entry) + continue + # Move only happens on preserve transition and + # only if original already exists. + if os.path.isfile(opath): + moved.append([mpath, tpath + ".legacy"]) + installed.append(entry) + continue + elif pres_type == True and save_file: + # If the source and destination path are the + # same, the content won't be updated. + if mpath != tpath: + # New content ignored in favour of old. + moved.append([mpath, tpath]) + continue + + # Next, if on-disk file will be preserved and some other + # unique_attr is changing (such as mode, etc.) mark the + # file as "updated". + if pres_type == True and ImagePlan.__find_inconsistent_attrs( + ((orig,), (dest,)), ignore=("path", "preserve") + ): + # For 'install-only', we can only update for + # inconsistent attributes if the file already + # exists. + if dpres_type != "install-only" or os.path.isfile(final_path): + updated.append(entry) + continue + + # For remaining cases, what happens is based on the + # result of _check_preserve(). + if pres_type == "renamenew": + if save_file: + moved.append([mpath, tpath]) + # Delivered content changed. + installed.append([tpath + ".new"]) + elif pres_type is None: + # Delivered content or unique_attrs changed. + updated.append(entry) + elif pres_type == False: + if save_file: + moved.append([mpath, tpath]) + continue + + if not os.path.isfile(final_path): + # File is missing or of wrong type. + installed.append(entry) + continue + + # If a file is moving between packages, it will + # appear as an update, but may not have not have + # different content or unique_attrs. Check to + # see if it does. + if ImagePlan.__find_inconsistent_attrs( + ((orig,), (dest,)), ignore=("path", "preserve") + ): + # Different unique_attrs. + updated.append(entry) + continue - tpath = dest.attrs["path"] - entry = [tpath] - save_file = dest.attrs.get("save_file") - if save_file: - tcache_name = save_file[0] - for (ridx, rentry) in enumerate(removed): - if len(rentry) == 1: - continue - - rpath, rcache_name, rorig = rentry - if rcache_name == tcache_name: - # If the cache name for this new - # file matches one of those for - # a removed file, the removed - # file will be renamed to this - # action's path before the - # action is processed. - del removed[ridx] - save_file = rpath - orig = rorig - break - else: - save_file = None - - if not orig and dpres_type == "install-only": - # For install-only, we can rely on - # _check_preserve. - try: - if not dest._check_preserve(orig, ap.p): - installed.append(entry) - except EnvironmentError as e: - if e.errno != errno.EACCES: - raise - continue - elif not orig: - # We can't rely on _check_preserve for this case - # as there's no existing on-disk file at the - # destination path yet. - if (dpres_type != "legacy" and - dpres_type != "abandon"): - # 'abandon' actions are never delivered; - # 'legacy' actions are only delivered if - # we're updating something already - # installed or moving an existing file. - installed.append(entry) - continue - elif orig.name != "file": - # File is being replaced with another object - # type. - updated.append(entry) - continue + attr, shash, ohash, hfunc = digest.get_common_preferred_hash( + dest, orig + ) + if shash != ohash: + # Delivered content changed. + updated.append(entry) + continue + + # Pre-sort results for consumers. + installed.sort() + moved.sort() + removed.sort() + updated.sort() + + self.pd._preserved = { + "installed": installed, + "moved": moved, + "removed": removed, + "updated": updated, + } + + def __evaluate_pkg_downloads(self): + """Private helper function that determines package data to be + downloaded and updates the plan accordingly.""" + + assert self.state >= plandesc.MERGED_OK + + pd = self.pd + + for p in pd.pkg_plans: + cpbytes, pbytes = p.get_bytes_added() + if p.destination_fmri: + mpath = self.image.get_manifest_path(p.destination_fmri) + try: + # Manifest data is essentially stored + # three times (original, cache, catalog). + # For now, include this in cbytes_added + # since that's closest to where the + # download cache is stored. + pd._cbytes_added += os.stat(mpath).st_size * 3 + except EnvironmentError as e: + raise api_errors._convert_error(e) + pd._cbytes_added += cpbytes + pd._bytes_added += pbytes + + # Include state directory in cbytes_added for now since it's + # closest to where the download cache is stored. (Twice the + # amount is used because image state update involves using + # a complete copy of existing state.) + pd._cbytes_added += misc.get_dir_size(self.image._statedir) * 2 + + # Our slop factor is 25%; overestimating is safer than under- + # estimating. This attempts to approximate how much overhead + # the filesystem will impose on the operation. Empirical + # testing suggests that overhead can vary wildly depending on + # average file size, fragmentation, zfs metadata overhead, etc. + # For an install of a package such as solaris-small-server into + # an image, a 12% difference between actual size and installed + # size was found, so this seems safe enough. (And helps account + # for any bootarchives, fs overhead, etc.) + pd._cbytes_added *= 1.25 + pd._bytes_added *= 1.25 + + # XXX For now, include cbytes_added in bytes_added total; in the + # future, this should only happen if they share the same + # filesystem. + pd._bytes_added += pd._cbytes_added + self.__update_avail_space() + + # Verify that there is enough space for the change. + if self.pd._bytes_added > self.pd._bytes_avail: + # During a dry run log a warning and continue to run the + # solver to produce any further warnings/errors. + if self.__noexecute: + msg = api_errors.ImageInsufficentSpace( + self.pd._bytes_added, + self.pd._bytes_avail, + _("Root filesystem"), + ) + timestamp = misc.time_to_timestamp(time.time()) + self.pd.add_item_message( + "warning", timestamp, MSG_WARNING, _(msg) + ) + else: + raise api_errors.ImageInsufficentSpace( + self.pd._bytes_added, + self.pd._bytes_avail, + _("Root filesystem"), + ) - # The order of these checks is significant in - # determining how a preserved file changed! - # - # First, check for on-disk content changes. - opath = orig.get_installed_path(self.image.get_root()) - try: - pres_type = dest._check_preserve(orig, ap.p, - orig_path=opath) - except EnvironmentError as e: - if e.errno == errno.EACCES: - continue - else: - raise - - final_path = dest.get_installed_path( - self.image.get_root()) - - # If a removed action is going to be restored to - # complete the operation, show the removed action path - # as the source for the move omitting the steps - # in-between. For example: - # moved: testme -> newme - # moved: newme -> newme.legacy - # installed: newme - # ...becomes: - # moved: testme -> newme.legacy - # installed: newme - if save_file: - mpath = save_file - else: - mpath = tpath - - if pres_type == "abandon": - # newly-tagged preserve=abandon files never - # delivered. - continue - elif pres_type == "renameold": - moved.append([mpath, tpath + ".old"]) - installed.append(entry) - continue - elif pres_type == "renameold.update": - moved.append([mpath, tpath + ".update"]) - installed.append(entry) - continue - elif pres_type == "legacy": - if orig.attrs.get("preserve") == "legacy": - updated.append(entry) - continue - # Move only happens on preserve transition and - # only if original already exists. - if os.path.isfile(opath): - moved.append([mpath, tpath + ".legacy"]) - installed.append(entry) - continue - elif pres_type == True and save_file: - # If the source and destination path are the - # same, the content won't be updated. - if mpath != tpath: - # New content ignored in favour of old. - moved.append([mpath, tpath]) - continue + def evaluate(self): + """Given already determined fmri changes, + build pkg plans and figure out exact impact of + proposed changes""" + + assert self.pd.state == plandesc.EVALUATED_PKGS, self + + if self.pd._image_lm != self.image.get_last_modified(string=True): + # State has been modified since plan was created; this + # plan is no longer valid. + raise api_errors.InvalidPlanError() + + self.__evaluate_pkg_plans() + self.__merge_actions() + self.__compile_release_notes() + + if not self.pd._li_pkg_updates and self.pd.pkg_plans: + # oops. the caller requested no package updates and + # we couldn't satisfy that request. + fmri_updates = [ + (p.origin_fmri, p.destination_fmri) for p in self.pd.pkg_plans + ] + raise api_errors.PlanCreationException( + pkg_updates_required=fmri_updates + ) + + # Check for files which have been elided due to image + # exclusions, and honour the image's exclusion policy. + ix_policy = self.image.get_property(imageconfig.EXCLUDE_POLICY) + if self.pd.elided_actions and ix_policy != "ignore": + elided = [] + for o, n in self.pd.get_elided_actions(): + if o is None: + elided.extend(n.attrlist("path")) + if ix_policy == "warn": + timestamp = misc.time_to_timestamp(time.time()) + self.pd.add_item_message( + "warning", + timestamp, + MSG_WARNING, + self.__make_excl_msg(elided), + ) + elif ix_policy == "reject": + raise api_errors.PlanExclusionError(paths=elided) + + # These must be done after action merging. + self.__evaluate_pkg_preserved_files() + self.__evaluate_pkg_downloads() + # If there are invalid mediators then message about it + # for the no execute (-n) or zones case. If an update + # in the global zone an exception will be raised later. + if self.invalid_meds: + if self.__noexecute or self.image.is_zone(): + medmsg = self.__make_med_msg() + timestamp = misc.time_to_timestamp(time.time()) + self.pd.add_item_message( + "warning", timestamp, MSG_WARNING, medmsg + ) - # Next, if on-disk file will be preserved and some other - # unique_attr is changing (such as mode, etc.) mark the - # file as "updated". - if (pres_type == True and - ImagePlan.__find_inconsistent_attrs( - ((orig,), (dest,)), - ignore=("path", "preserve"))): - - # For 'install-only', we can only update for - # inconsistent attributes if the file already - # exists. - if (dpres_type != "install-only" or - os.path.isfile(final_path)): - updated.append(entry) - continue + def __update_avail_space(self): + """Update amount of available space on FS""" + + self.pd._cbytes_avail = misc.spaceavail(self.image.write_cache_path) + + self.pd._bytes_avail = misc.spaceavail(self.image.root) + # if we don't have a full image yet + if self.pd._cbytes_avail < 0: + self.pd._cbytes_avail = self.pd._bytes_avail + + def __include_note(self, installed_dict, act, containing_fmri): + """Decide if a release note should be shown/included. If + feature/pkg/self is fmri, fmri is containing package; + if version is then 0, this is note is displayed on initial + install only. Otherwise, if version earlier than specified + fmri is present in code, display release note.""" + + for fmristr in act.attrlist("release-note"): + try: + pfmri = pkg.fmri.PkgFmri(fmristr) + except pkg.fmri.FmriError: + continue # skip malformed fmris + # any special handling here? + if pfmri.pkg_name == "feature/pkg/self": + if ( + str(pfmri.version) == "0,5.11" + and containing_fmri.pkg_name not in installed_dict + ): + return True + else: + pfmri.pkg_name = containing_fmri.pkg_name + if pfmri.pkg_name not in installed_dict: + continue + installed_fmri = installed_dict[pfmri.pkg_name] + # if neither is successor they are equal + if pfmri.is_successor(installed_fmri): + return True + return False + + def __get_note_text(self, act, pfmri): + """Retrieve text for release note from repo + + If there are UTF-8 encoding errors in the text replace them + so that we still have a note to show rather than failing + the entire operation. The copy saved on disk is left as is.""" + try: + pub = self.image.get_publisher(pfmri.publisher) + hash_attr, hash_val, hash_func = digest.get_least_preferred_hash( + act + ) + return self.image.transport.get_content( + pub, hash_val, fmri=pfmri, hash_func=hash_func, errors="replace" + ) + finally: + self.image.cleanup_downloads() + + def __compile_release_notes(self): + """Figure out what release notes need to be displayed""" + release_notes = self.pd._actuators.get_release_note_info() + must_display = False + notes = [] + + if release_notes: + installed_dict = ImagePlan.__fmris2dict( + self.image.gen_installed_pkgs() + ) + for act, pfmri in release_notes: + if self.__include_note(installed_dict, act, pfmri): + if act.attrs.get("must-display", "false") == "true": + must_display = True + for l in self.__get_note_text(act, pfmri).splitlines(): + notes.append(misc.decode(l)) + + self.pd.release_notes = (must_display, notes) + + def __save_release_notes(self): + """Save a copy of the release notes and store the file name""" + if self.pd.release_notes[1]: + # create a file in imgdir/notes + dpath = os.path.join(self.image.imgdir, "notes") + misc.makedirs(dpath) + fd, path = tempfile.mkstemp( + suffix=".txt", dir=dpath, prefix="release-notes-" + ) + tmpfile = os.fdopen(fd, "w") + for note in self.pd.release_notes[1]: + note = misc.force_str(note) + print(note, file=tmpfile) + # make file world readable + os.chmod(path, 0o644) + tmpfile.close() + self.pd.release_notes_name = os.path.basename(path) + + def __evaluate_pkg_plans(self): + """Internal helper function that does the work of converting + fmri changes into pkg plans.""" + + pt = self.__progtrack + # prefetch manifests + prefetch_mfsts = [] # manifest, intents to be prefetched + eval_list = [] # oldfmri, oldintent, newfmri, newintent + # prefetched intents omitted + enabled_publishers = set( + [a.prefix for a in self.image.gen_publishers()] + ) + + # + # XXX this could be improved, or perhaps the "do we have it?" + # logic could be moved into prefetch_manifests, and + # PLAN_FIND_MFST could go away? This can be slow. + # + pt.plan_start(pt.PLAN_FIND_MFST) + for oldfmri, newfmri in self.pd._fmri_changes: + pt.plan_add_progress(pt.PLAN_FIND_MFST) + old_in, new_in = self.__create_intent( + oldfmri, newfmri, enabled_publishers + ) + if oldfmri: + if not self.image.has_manifest(oldfmri): + prefetch_mfsts.append((oldfmri, old_in)) + old_in = None # so we don't send it twice + if newfmri: + if not self.image.has_manifest(newfmri): + prefetch_mfsts.append((newfmri, new_in)) + new_in = None + eval_list.append((oldfmri, old_in, newfmri, new_in)) + old_in = new_in = None + pt.plan_done(pt.PLAN_FIND_MFST) + + # No longer needed. + del enabled_publishers + self.__match_rm = {} + self.__match_update = {} + + self.image.transport.prefetch_manifests( + prefetch_mfsts, + ccancel=self.__check_cancel, + progtrack=self.__progtrack, + ) + + # No longer needed. + del prefetch_mfsts + + max_items = len(eval_list) + pt.plan_start(pt.PLAN_PKGPLAN, goal=max_items) + same_excludes = self.__old_excludes == self.__new_excludes + + for oldfmri, old_in, newfmri, new_in in eval_list: + pp = pkgplan.PkgPlan(self.image) + + if oldfmri == newfmri: + # When creating intent, we always prefer to send + # the new intent over old intent (see + # __create_intent), so it's not necessary to + # touch the old manifest in this situation. + m = self.__get_manifest(newfmri, new_in, ignore_excludes=True) + pp.propose(oldfmri, m, newfmri, m) + can_exclude = same_excludes + else: + pp.propose( + oldfmri, + self.__get_manifest(oldfmri, old_in), + newfmri, + self.__get_manifest(newfmri, new_in, ignore_excludes=True), + ) + can_exclude = True + + pp.evaluate( + self.__old_excludes, + self.__new_excludes, + can_exclude=can_exclude, + ) + + self.pd.pkg_plans.append(pp) + pt.plan_add_progress(pt.PLAN_PKGPLAN, nitems=1) + pp = None + + # No longer needed. + del eval_list + pt.plan_done(pt.PLAN_PKGPLAN) + + def __mediate_links(self, mediated_removed_paths): + """Mediate links in the plan--this requires first determining the + possible mediation for each mediator. This is done solely based + on the metadata of the links that are still or will be installed. + Returns a dictionary of the proposed mediations.""" + + # + # If we're not changing mediators, and we're not changing + # variants or facets (which could affect mediators), and we're + # not changing any packages (which could affect mediators), + # then mediators can't be changing so there's nothing to do + # here. + # + if ( + not self.pd._mediators_change + and not self.pd._varcets_change + and not self.pd._fmri_changes + ): + # return the currently configured mediators + return defaultdict(set, self.pd._cfg_mediators) + + prop_mediators = defaultdict(set) + mediated_installed_paths = defaultdict(set) + for a, pfmri in itertools.chain( + self.gen_new_installed_actions_bytype("link"), + self.gen_new_installed_actions_bytype("hardlink"), + ): + mediator = a.attrs.get("mediator") + if not mediator: + # Link is not mediated. + continue + med_ver = a.attrs.get("mediator-version") + if med_ver: + med_ver = pkg.version.Version(med_ver) + med_impl = a.attrs.get("mediator-implementation") + if not (med_ver or med_impl): + # Link mediation is incomplete. + continue + med_priority = a.attrs.get("mediator-priority") + prop_mediators[mediator].add((med_priority, med_ver, med_impl)) + mediated_installed_paths[a.attrs["path"]].add( + (a, pfmri, mediator, med_ver, med_impl) + ) + + # Now select only the "best" mediation for each mediator; + # items() is used here as the dictionary is altered during + # iteration. + cfg_mediators = self.pd._cfg_mediators + changed_mediators = set() + for mediator, values in prop_mediators.items(): + med_ver_source = ( + med_impl_source + ) = med_priority = med_ver = med_impl = med_impl_ver = None + + mediation = self.pd._new_mediators.get(mediator) + cfg_mediation = cfg_mediators.get(mediator) + if mediation: + med_ver = mediation.get("version") + med_ver_source = mediation.get("version-source") + med_impl = mediation.get("implementation") + med_impl_source = mediation.get("implementation-source") + elif mediation is None and cfg_mediation: + # If a reset of mediation was not requested, + # use previously configured mediation as the + # default. + med_ver = cfg_mediation.get("version") + med_ver_source = cfg_mediation.get("version-source") + med_impl = cfg_mediation.get("implementation") + med_impl_source = cfg_mediation.get("implementation-source") + + # Pick first "optimal" version and/or implementation. + for opt_priority, opt_ver, opt_impl in sorted( + values, key=cmp_to_key(med.cmp_mediations) + ): + if med_ver_source == "local": + if opt_ver != med_ver: + # This mediation not allowed + # by local configuration. + continue + if med_impl_source == "local": + if not mediator_impl_matches(opt_impl, med_impl): + # This mediation not allowed + # by local configuration. + continue + + med_source = opt_priority + if not med_source: + # 'source' is equivalent to priority, + # but if no priority was specified, + # treat this as 'system' to indicate + # the mediation component was arbitrarily + # selected. + med_source = "system" + + if med_ver_source != "local": + med_ver = opt_ver + med_ver_source = med_source + if med_impl_source != "local": + med_impl = opt_impl + med_impl_source = med_source + elif med_impl and "@" not in med_impl: + # In the event a versionless + # implementation is set by the + # administrator, the version component + # has to be stored separately for display + # purposes. + impl_ver = med.parse_mediator_implementation(opt_impl)[1] + if impl_ver: + med_impl_ver = impl_ver + break + + if cfg_mediation and ( + med_ver != cfg_mediation.get("version") + or not mediator_impl_matches( + med_impl, cfg_mediation.get("implementation") + ) + ): + # If mediation has changed for a mediator, then + # all links for already installed packages will + # have to be removed if they are for the old + # mediation or repaired (installed) if they are + # for the new mediation. + changed_mediators.add(mediator) + + prop_mediators[mediator] = {} + if med_ver: + prop_mediators[mediator]["version"] = med_ver + if med_ver_source: + prop_mediators[mediator]["version-source"] = med_ver_source + if med_impl: + prop_mediators[mediator]["implementation"] = med_impl + if med_impl_ver: + prop_mediators[mediator][ + "implementation-version" + ] = med_impl_ver + if med_impl_source: + prop_mediators[mediator][ + "implementation-source" + ] = med_impl_source + + # Determine which install and update actions should not be + # executed based on configured and proposed mediations. Also + # transform any install or update actions belonging to a + # changing mediation into removals. + + # This keeps track of which pkgplans need to be trimmed. + act_removals = {} + + # This keeps track of which mediated paths are being delivered + # and which need removal. + act_mediated_paths = {"installed": {}, "removed": {}} + + for al, ptype in ( + (self.pd.install_actions, "added"), + (self.pd.update_actions, "changed"), + ): + for i, ap in enumerate(al): + if not ap or not ( + ap.dst.name == "link" or ap.dst.name == "hardlink" + ): + continue - # For remaining cases, what happens is based on the - # result of _check_preserve(). - if pres_type == "renamenew": - if save_file: - moved.append([mpath, tpath]) - # Delivered content changed. - installed.append([tpath + ".new"]) - elif pres_type is None: - # Delivered content or unique_attrs changed. - updated.append(entry) - elif pres_type == False: - if save_file: - moved.append([mpath, tpath]) - continue - - if not os.path.isfile(final_path): - # File is missing or of wrong type. - installed.append(entry) - continue - - # If a file is moving between packages, it will - # appear as an update, but may not have not have - # different content or unique_attrs. Check to - # see if it does. - if ImagePlan.__find_inconsistent_attrs( - ((orig,), (dest,)), - ignore=("path", "preserve")): - # Different unique_attrs. - updated.append(entry) - continue - - attr, shash, ohash, hfunc = \ - digest.get_common_preferred_hash(dest, orig) - if shash != ohash: - # Delivered content changed. - updated.append(entry) - continue - - # Pre-sort results for consumers. - installed.sort() - moved.sort() - removed.sort() - updated.sort() - - self.pd._preserved = { - "installed": installed, - "moved": moved, - "removed": removed, - "updated": updated, - } - - def __evaluate_pkg_downloads(self): - """Private helper function that determines package data to be - downloaded and updates the plan accordingly.""" - - assert self.state >= plandesc.MERGED_OK - - pd = self.pd - - for p in pd.pkg_plans: - cpbytes, pbytes = p.get_bytes_added() - if p.destination_fmri: - mpath = self.image.get_manifest_path( - p.destination_fmri) - try: - # Manifest data is essentially stored - # three times (original, cache, catalog). - # For now, include this in cbytes_added - # since that's closest to where the - # download cache is stored. - pd._cbytes_added += \ - os.stat(mpath).st_size * 3 - except EnvironmentError as e: - raise api_errors._convert_error(e) - pd._cbytes_added += cpbytes - pd._bytes_added += pbytes - - # Include state directory in cbytes_added for now since it's - # closest to where the download cache is stored. (Twice the - # amount is used because image state update involves using - # a complete copy of existing state.) - pd._cbytes_added += misc.get_dir_size(self.image._statedir) * 2 - - # Our slop factor is 25%; overestimating is safer than under- - # estimating. This attempts to approximate how much overhead - # the filesystem will impose on the operation. Empirical - # testing suggests that overhead can vary wildly depending on - # average file size, fragmentation, zfs metadata overhead, etc. - # For an install of a package such as solaris-small-server into - # an image, a 12% difference between actual size and installed - # size was found, so this seems safe enough. (And helps account - # for any bootarchives, fs overhead, etc.) - pd._cbytes_added *= 1.25 - pd._bytes_added *= 1.25 - - # XXX For now, include cbytes_added in bytes_added total; in the - # future, this should only happen if they share the same - # filesystem. - pd._bytes_added += pd._cbytes_added - self.__update_avail_space() - - # Verify that there is enough space for the change. - if self.pd._bytes_added > self.pd._bytes_avail: - # During a dry run log a warning and continue to run the - # solver to produce any further warnings/errors. - if self.__noexecute: - msg = api_errors.ImageInsufficentSpace( - self.pd._bytes_added, - self.pd._bytes_avail, - _("Root filesystem")) - timestamp = misc.time_to_timestamp(time.time()) - self.pd.add_item_message("warning", - timestamp, MSG_WARNING, _(msg)) - else: - raise api_errors.ImageInsufficentSpace( - self.pd._bytes_added, - self.pd._bytes_avail, - _("Root filesystem")) - - - def evaluate(self): - """Given already determined fmri changes, - build pkg plans and figure out exact impact of - proposed changes""" - - assert self.pd.state == plandesc.EVALUATED_PKGS, self - - if self.pd._image_lm != \ - self.image.get_last_modified(string=True): - # State has been modified since plan was created; this - # plan is no longer valid. - raise api_errors.InvalidPlanError() - - self.__evaluate_pkg_plans() - self.__merge_actions() - self.__compile_release_notes() - - if not self.pd._li_pkg_updates and self.pd.pkg_plans: - # oops. the caller requested no package updates and - # we couldn't satisfy that request. - fmri_updates = [ - (p.origin_fmri, p.destination_fmri) - for p in self.pd.pkg_plans - ] - raise api_errors.PlanCreationException( - pkg_updates_required=fmri_updates) - - # Check for files which have been elided due to image - # exclusions, and honour the image's exclusion policy. - ix_policy = self.image.get_property( - imageconfig.EXCLUDE_POLICY) - if self.pd.elided_actions and ix_policy != 'ignore': - elided = [] - for (o, n) in self.pd.get_elided_actions(): - if o is None: - elided.extend(n.attrlist('path')) - if ix_policy == 'warn': - timestamp = misc.time_to_timestamp(time.time()) - self.pd.add_item_message( - "warning", timestamp, MSG_WARNING, - self.__make_excl_msg(elided)) - elif ix_policy == 'reject': - raise api_errors.PlanExclusionError(paths=elided) - - # These must be done after action merging. - self.__evaluate_pkg_preserved_files() - self.__evaluate_pkg_downloads() - # If there are invalid mediators then message about it - # for the no execute (-n) or zones case. If an update - # in the global zone an exception will be raised later. - if self.invalid_meds: - if self.__noexecute or self.image.is_zone(): - medmsg = self.__make_med_msg() - timestamp = misc.time_to_timestamp(time.time()) - self.pd.add_item_message("warning", timestamp, - MSG_WARNING, medmsg) - - def __update_avail_space(self): - """Update amount of available space on FS""" - - self.pd._cbytes_avail = misc.spaceavail( - self.image.write_cache_path) - - self.pd._bytes_avail = misc.spaceavail(self.image.root) - # if we don't have a full image yet - if self.pd._cbytes_avail < 0: - self.pd._cbytes_avail = self.pd._bytes_avail - - def __include_note(self, installed_dict, act, containing_fmri): - """Decide if a release note should be shown/included. If - feature/pkg/self is fmri, fmri is containing package; - if version is then 0, this is note is displayed on initial - install only. Otherwise, if version earlier than specified - fmri is present in code, display release note.""" - - for fmristr in act.attrlist("release-note"): - try: - pfmri = pkg.fmri.PkgFmri(fmristr) - except pkg.fmri.FmriError: - continue # skip malformed fmris - # any special handling here? - if pfmri.pkg_name == "feature/pkg/self": - if str(pfmri.version) == "0,5.11" \ - and containing_fmri.pkg_name \ - not in installed_dict: - return True - else: - pfmri.pkg_name = \ - containing_fmri.pkg_name - if pfmri.pkg_name not in installed_dict: - continue - installed_fmri = installed_dict[pfmri.pkg_name] - # if neither is successor they are equal - if pfmri.is_successor(installed_fmri): - return True - return False + mediator = ap.dst.attrs.get("mediator") + if not mediator: + # Link is not mediated. + continue - def __get_note_text(self, act, pfmri): - """Retrieve text for release note from repo + med_ver = ap.dst.attrs.get("mediator-version") + if med_ver: + med_ver = pkg.version.Version(med_ver) + med_impl = ap.dst.attrs.get("mediator-implementation") - If there are UTF-8 encoding errors in the text replace them - so that we still have a note to show rather than failing - the entire operation. The copy saved on disk is left as is.""" - try: - pub = self.image.get_publisher(pfmri.publisher) - hash_attr, hash_val, hash_func = \ - digest.get_least_preferred_hash(act) - return self.image.transport.get_content(pub, hash_val, - fmri=pfmri, hash_func=hash_func, errors="replace") - finally: - self.image.cleanup_downloads() - - def __compile_release_notes(self): - """Figure out what release notes need to be displayed""" - release_notes = self.pd._actuators.get_release_note_info() - must_display = False - notes = [] - - if release_notes: - installed_dict = ImagePlan.__fmris2dict( - self.image.gen_installed_pkgs()) - for act, pfmri in release_notes: - if self.__include_note(installed_dict, act, - pfmri): - if act.attrs.get("must-display", - "false") == "true": - must_display = True - for l in self.__get_note_text( - act, pfmri).splitlines(): - notes.append(misc.decode(l)) - - self.pd.release_notes = (must_display, notes) - - def __save_release_notes(self): - """Save a copy of the release notes and store the file name""" - if self.pd.release_notes[1]: - # create a file in imgdir/notes - dpath = os.path.join(self.image.imgdir, "notes") - misc.makedirs(dpath) - fd, path = tempfile.mkstemp(suffix=".txt", - dir=dpath, prefix="release-notes-") - tmpfile = os.fdopen(fd, "w") - for note in self.pd.release_notes[1]: - note = misc.force_str(note) - print(note, file=tmpfile) - # make file world readable - os.chmod(path, 0o644) - tmpfile.close() - self.pd.release_notes_name = os.path.basename(path) - - def __evaluate_pkg_plans(self): - """Internal helper function that does the work of converting - fmri changes into pkg plans.""" - - pt = self.__progtrack - # prefetch manifests - prefetch_mfsts = [] # manifest, intents to be prefetched - eval_list = [] # oldfmri, oldintent, newfmri, newintent - # prefetched intents omitted - enabled_publishers = set([ - a.prefix - for a in self.image.gen_publishers() - ]) - - # - # XXX this could be improved, or perhaps the "do we have it?" - # logic could be moved into prefetch_manifests, and - # PLAN_FIND_MFST could go away? This can be slow. - # - pt.plan_start(pt.PLAN_FIND_MFST) - for oldfmri, newfmri in self.pd._fmri_changes: - pt.plan_add_progress(pt.PLAN_FIND_MFST) - old_in, new_in = self.__create_intent(oldfmri, newfmri, - enabled_publishers) - if oldfmri: - if not self.image.has_manifest(oldfmri): - prefetch_mfsts.append((oldfmri, old_in)) - old_in = None # so we don't send it twice - if newfmri: - if not self.image.has_manifest(newfmri): - prefetch_mfsts.append((newfmri, new_in)) - new_in = None - eval_list.append((oldfmri, old_in, newfmri, new_in)) - old_in = new_in = None - pt.plan_done(pt.PLAN_FIND_MFST) - - # No longer needed. - del enabled_publishers - self.__match_rm = {} - self.__match_update = {} - - self.image.transport.prefetch_manifests(prefetch_mfsts, - ccancel=self.__check_cancel, progtrack=self.__progtrack) - - # No longer needed. - del prefetch_mfsts - - max_items = len(eval_list) - pt.plan_start(pt.PLAN_PKGPLAN, goal=max_items) - same_excludes = self.__old_excludes == self.__new_excludes - - for oldfmri, old_in, newfmri, new_in in eval_list: - pp = pkgplan.PkgPlan(self.image) - - if oldfmri == newfmri: - # When creating intent, we always prefer to send - # the new intent over old intent (see - # __create_intent), so it's not necessary to - # touch the old manifest in this situation. - m = self.__get_manifest(newfmri, new_in, - ignore_excludes=True) - pp.propose( - oldfmri, m, - newfmri, m) - can_exclude = same_excludes - else: - pp.propose( - oldfmri, - self.__get_manifest(oldfmri, old_in), - newfmri, - self.__get_manifest(newfmri, new_in, - ignore_excludes=True)) - can_exclude = True - - pp.evaluate(self.__old_excludes, self.__new_excludes, - can_exclude=can_exclude) - - self.pd.pkg_plans.append(pp) - pt.plan_add_progress(pt.PLAN_PKGPLAN, nitems=1) - pp = None - - # No longer needed. - del eval_list - pt.plan_done(pt.PLAN_PKGPLAN) + prop_med_ver = prop_mediators[mediator].get("version") + prop_med_impl = prop_mediators[mediator].get("implementation") - def __mediate_links(self, mediated_removed_paths): - """Mediate links in the plan--this requires first determining the - possible mediation for each mediator. This is done solely based - on the metadata of the links that are still or will be installed. - Returns a dictionary of the proposed mediations.""" - - # - # If we're not changing mediators, and we're not changing - # variants or facets (which could affect mediators), and we're - # not changing any packages (which could affect mediators), - # then mediators can't be changing so there's nothing to do - # here. - # - if not self.pd._mediators_change and \ - not self.pd._varcets_change and \ - not self.pd._fmri_changes: - # return the currently configured mediators - return defaultdict(set, self.pd._cfg_mediators) - - prop_mediators = defaultdict(set) - mediated_installed_paths = defaultdict(set) - for a, pfmri in itertools.chain( - self.gen_new_installed_actions_bytype("link"), - self.gen_new_installed_actions_bytype("hardlink")): - mediator = a.attrs.get("mediator") - if not mediator: - # Link is not mediated. - continue - med_ver = a.attrs.get("mediator-version") - if med_ver: - med_ver = pkg.version.Version(med_ver) - med_impl = a.attrs.get("mediator-implementation") - if not (med_ver or med_impl): - # Link mediation is incomplete. - continue - med_priority = a.attrs.get("mediator-priority") - prop_mediators[mediator].add((med_priority, med_ver, - med_impl)) - mediated_installed_paths[a.attrs["path"]].add((a, pfmri, - mediator, med_ver, med_impl)) - - # Now select only the "best" mediation for each mediator; - # items() is used here as the dictionary is altered during - # iteration. - cfg_mediators = self.pd._cfg_mediators - changed_mediators = set() - for mediator, values in prop_mediators.items(): - med_ver_source = med_impl_source = med_priority = \ - med_ver = med_impl = med_impl_ver = None - - mediation = self.pd._new_mediators.get(mediator) - cfg_mediation = cfg_mediators.get(mediator) - if mediation: - med_ver = mediation.get("version") - med_ver_source = mediation.get("version-source") - med_impl = mediation.get("implementation") - med_impl_source = mediation.get( - "implementation-source") - elif mediation is None and cfg_mediation: - # If a reset of mediation was not requested, - # use previously configured mediation as the - # default. - med_ver = cfg_mediation.get("version") - med_ver_source = cfg_mediation.get( - "version-source") - med_impl = cfg_mediation.get("implementation") - med_impl_source = cfg_mediation.get( - "implementation-source") - - # Pick first "optimal" version and/or implementation. - for opt_priority, opt_ver, opt_impl in sorted(values, - key=cmp_to_key(med.cmp_mediations)): - if med_ver_source == "local": - if opt_ver != med_ver: - # This mediation not allowed - # by local configuration. - continue - if med_impl_source == "local": - if not mediator_impl_matches(opt_impl, - med_impl): - # This mediation not allowed - # by local configuration. - continue - - med_source = opt_priority - if not med_source: - # 'source' is equivalent to priority, - # but if no priority was specified, - # treat this as 'system' to indicate - # the mediation component was arbitrarily - # selected. - med_source = "system" - - if med_ver_source != "local": - med_ver = opt_ver - med_ver_source = med_source - if med_impl_source != "local": - med_impl = opt_impl - med_impl_source = med_source - elif med_impl and "@" not in med_impl: - # In the event a versionless - # implementation is set by the - # administrator, the version component - # has to be stored separately for display - # purposes. - impl_ver = \ - med.parse_mediator_implementation( - opt_impl)[1] - if impl_ver: - med_impl_ver = impl_ver - break - - if cfg_mediation and \ - (med_ver != cfg_mediation.get("version") or - not mediator_impl_matches(med_impl, - cfg_mediation.get("implementation"))): - # If mediation has changed for a mediator, then - # all links for already installed packages will - # have to be removed if they are for the old - # mediation or repaired (installed) if they are - # for the new mediation. - changed_mediators.add(mediator) - - prop_mediators[mediator] = {} - if med_ver: - prop_mediators[mediator]["version"] = med_ver - if med_ver_source: - prop_mediators[mediator]["version-source"] = \ - med_ver_source - if med_impl: - prop_mediators[mediator]["implementation"] = \ - med_impl - if med_impl_ver: - prop_mediators[mediator]["implementation-version"] = \ - med_impl_ver - if med_impl_source: - prop_mediators[mediator]["implementation-source"] = \ - med_impl_source - - # Determine which install and update actions should not be - # executed based on configured and proposed mediations. Also - # transform any install or update actions belonging to a - # changing mediation into removals. - - # This keeps track of which pkgplans need to be trimmed. - act_removals = {} - - # This keeps track of which mediated paths are being delivered - # and which need removal. - act_mediated_paths = { "installed": {}, "removed": {} } - - for al, ptype in ((self.pd.install_actions, "added"), - (self.pd.update_actions, "changed")): - for i, ap in enumerate(al): - if not ap or not (ap.dst.name == "link" or - ap.dst.name == "hardlink"): - continue - - mediator = ap.dst.attrs.get("mediator") - if not mediator: - # Link is not mediated. - continue - - med_ver = ap.dst.attrs.get("mediator-version") - if med_ver: - med_ver = pkg.version.Version(med_ver) - med_impl = ap.dst.attrs.get( - "mediator-implementation") - - prop_med_ver = prop_mediators[mediator].get( - "version") - prop_med_impl = prop_mediators[mediator].get( - "implementation") - - if med_ver == prop_med_ver and \ - mediator_impl_matches(med_impl, - prop_med_impl): - # Action should be delivered. - act_mediated_paths["installed"][ap.dst.attrs["path"]] = \ - None - mediated_installed_paths.pop( - ap.dst.attrs["path"], None) - continue - - # Ensure action is not delivered. - al[i] = None - - act_removals.setdefault(id(ap.p), - { "plan": ap.p, "added": [], "changed": [] }) - act_removals[id(ap.p)][ptype].append(id(ap.dst)) - - cfg_med_ver = cfg_mediators.get(mediator, - misc.EmptyDict).get("version") - cfg_med_impl = cfg_mediators.get(mediator, - misc.EmptyDict).get("implementation") - - if (mediator in cfg_mediators and - mediator in prop_mediators and - (med_ver == cfg_med_ver and - mediator_impl_matches(med_impl, - cfg_med_impl))): - # Install / update actions should only be - # transformed into removals if they match - # the previous configuration and are not - # allowed by the proposed mediation. - act_mediated_paths["removed"].setdefault( - ap.dst.attrs["path"], []).append(ap) - - # As an optimization, only remove rejected, mediated paths if - # another link is not being delivered to the same path. - for ap in ( - ap - for path in act_mediated_paths["removed"] - for ap in act_mediated_paths["removed"][path] - if path not in act_mediated_paths["installed"] + if med_ver == prop_med_ver and mediator_impl_matches( + med_impl, prop_med_impl ): - ap.p.actions.removed.append((ap.dst, - None)) - self.pd.removal_actions.append(_ActionPlan( - ap.p, ap.dst, None)) - act_mediated_paths = None - - for a, pfmri, mediator, med_ver, med_impl in ( - med_link - for entry in mediated_installed_paths.values() - for med_link in entry): - if mediator not in changed_mediators: - # Action doesn't need repairing. - continue + # Action should be delivered. + act_mediated_paths["installed"][ap.dst.attrs["path"]] = None + mediated_installed_paths.pop(ap.dst.attrs["path"], None) + continue - new_med_ver = prop_mediators[mediator].get("version") - new_med_impl = prop_mediators[mediator].get( - "implementation") + # Ensure action is not delivered. + al[i] = None - if med_ver == new_med_ver and \ - mediator_impl_matches(med_impl, new_med_impl): - # Action needs to be repaired (installed) since - # mediation now applies. - self.__propose_fixup(a, None, pfmri) - continue + act_removals.setdefault( + id(ap.p), {"plan": ap.p, "added": [], "changed": []} + ) + act_removals[id(ap.p)][ptype].append(id(ap.dst)) - if mediator not in cfg_mediators: - # Nothing to do. - continue + cfg_med_ver = cfg_mediators.get(mediator, misc.EmptyDict).get( + "version" + ) + cfg_med_impl = cfg_mediators.get(mediator, misc.EmptyDict).get( + "implementation" + ) - cfg_med_ver = cfg_mediators[mediator].get("version") - cfg_med_impl = cfg_mediators[mediator].get( - "implementation") - if a.attrs["path"] not in mediated_removed_paths and \ - med_ver == cfg_med_ver and \ - mediator_impl_matches(med_impl, cfg_med_impl): - # Action needs to be removed since mediation no - # longer applies and is not already set for - # removal. - self.__propose_fixup(None, a, pfmri) - - # Now trim pkgplans and elide empty entries from list of actions - # to execute. - for entry in act_removals.values(): - p = entry["plan"] - for prop in ("added", "changed"): - trim = entry[prop] - # Can't modify the p.actions tuple directly, so - # modify the members in place. - pval = getattr(p.actions, prop) - pval[:] = [ - a - for a in pval - if id(a[1]) not in trim - ] - - return prop_mediators - - def __full_target_path(self, linkpath): - """ Resolves a link target to a relative pathname - of the image being modified. """ - - rootpath = os.path.join(self.image.root, linkpath) - reallinkpath = os.path.normpath(rootpath) - - targetname = os.path.realpath(reallinkpath) - # Should only trigger if there is something wrong with the - # associated package manifest. - assert(targetname.startswith(self.image.root)) - # Chop the image.root off as a relative path to match - # the package manifests is required. - res = targetname[len(self.image.root):] - # Zones will have an extra '/' at the start of the pathname, - # so remove it. - if res.startswith("/"): - return res[1:] - return res - - def __make_med_msg(self): - """ Helper function to create the message string for poorly - configured mediators. """ - fmt_str = " {0:24} {1}\n" - res = _("The following mediated link targets do not exist, " - "please reset the links via pkg set-mediator:\n") - res = res + fmt_str.format(_("MEDIATOR"), _("REMOVED PATH(S)")) - for med in self.invalid_meds: - res = res + fmt_str.format(med, - ", ".join( - self.invalid_meds[med])) - return res - - def __make_excl_msg(self, elided: list[str]) -> str: - """ Helper function to create the message string for excluded - actions. """ - - return _("""\ + if ( + mediator in cfg_mediators + and mediator in prop_mediators + and ( + med_ver == cfg_med_ver + and mediator_impl_matches(med_impl, cfg_med_impl) + ) + ): + # Install / update actions should only be + # transformed into removals if they match + # the previous configuration and are not + # allowed by the proposed mediation. + act_mediated_paths["removed"].setdefault( + ap.dst.attrs["path"], [] + ).append(ap) + + # As an optimization, only remove rejected, mediated paths if + # another link is not being delivered to the same path. + for ap in ( + ap + for path in act_mediated_paths["removed"] + for ap in act_mediated_paths["removed"][path] + if path not in act_mediated_paths["installed"] + ): + ap.p.actions.removed.append((ap.dst, None)) + self.pd.removal_actions.append(_ActionPlan(ap.p, ap.dst, None)) + act_mediated_paths = None + + for a, pfmri, mediator, med_ver, med_impl in ( + med_link + for entry in mediated_installed_paths.values() + for med_link in entry + ): + if mediator not in changed_mediators: + # Action doesn't need repairing. + continue + + new_med_ver = prop_mediators[mediator].get("version") + new_med_impl = prop_mediators[mediator].get("implementation") + + if med_ver == new_med_ver and mediator_impl_matches( + med_impl, new_med_impl + ): + # Action needs to be repaired (installed) since + # mediation now applies. + self.__propose_fixup(a, None, pfmri) + continue + + if mediator not in cfg_mediators: + # Nothing to do. + continue + + cfg_med_ver = cfg_mediators[mediator].get("version") + cfg_med_impl = cfg_mediators[mediator].get("implementation") + if ( + a.attrs["path"] not in mediated_removed_paths + and med_ver == cfg_med_ver + and mediator_impl_matches(med_impl, cfg_med_impl) + ): + # Action needs to be removed since mediation no + # longer applies and is not already set for + # removal. + self.__propose_fixup(None, a, pfmri) + + # Now trim pkgplans and elide empty entries from list of actions + # to execute. + for entry in act_removals.values(): + p = entry["plan"] + for prop in ("added", "changed"): + trim = entry[prop] + # Can't modify the p.actions tuple directly, so + # modify the members in place. + pval = getattr(p.actions, prop) + pval[:] = [a for a in pval if id(a[1]) not in trim] + + return prop_mediators + + def __full_target_path(self, linkpath): + """Resolves a link target to a relative pathname + of the image being modified.""" + + rootpath = os.path.join(self.image.root, linkpath) + reallinkpath = os.path.normpath(rootpath) + + targetname = os.path.realpath(reallinkpath) + # Should only trigger if there is something wrong with the + # associated package manifest. + assert targetname.startswith(self.image.root) + # Chop the image.root off as a relative path to match + # the package manifests is required. + res = targetname[len(self.image.root) :] + # Zones will have an extra '/' at the start of the pathname, + # so remove it. + if res.startswith("/"): + return res[1:] + return res + + def __make_med_msg(self): + """Helper function to create the message string for poorly + configured mediators.""" + fmt_str = " {0:24} {1}\n" + res = _( + "The following mediated link targets do not exist, " + "please reset the links via pkg set-mediator:\n" + ) + res = res + fmt_str.format(_("MEDIATOR"), _("REMOVED PATH(S)")) + for med in self.invalid_meds: + res = res + fmt_str.format(med, ", ".join(self.invalid_meds[med])) + return res + + def __make_excl_msg(self, elided: list[str]) -> str: + """Helper function to create the message string for excluded + actions.""" + + return ( + _( + """\ ***************************************************************************** WARNING: The following actions have not been installed as this is a partial -image (there are configured exclusions):""") + \ - "\n\n " + "\n ".join(elided) + """ +image (there are configured exclusions):""" + ) + + "\n\n " + + "\n ".join(elided) + + """ ***************************************************************************** """ + ) + + def __is_target_removed(self, filepath): + """Check to see if the named filepath is being removed in + the plan.""" + removed = self.pd.find_removal(filepath) + return removed + + def __finalize_mediation(self, prop_mediators, mediated_del_path_target): + """Merge requested and previously configured mediators that are + being set but don't affect the plan and update proposed image + configuration.""" + + cfg_mediators = self.pd._cfg_mediators + for m in self.pd._new_mediators: + prop_mediators.setdefault(m, self.pd._new_mediators[m]) + for m in cfg_mediators: + mediation = cfg_mediators[m] + # Check to see if the proposed mediator has removed + # targets, if so then upon an update there will be + # invalid mediated links so add the target to the + # invalid list, otherwise it is okay so nothing to + # do. + if m in prop_mediators: + if m in mediated_del_path_target: + for target in mediated_del_path_target[m]: + if self.__is_target_removed(target): + self.invalid_meds[m].add(target) + continue + + new_mediation = mediation.copy() + if mediation.get("version-source") != "local": + new_mediation.pop("version", None) + del new_mediation["version-source"] + if mediation.get("implementation-source") != "local": + new_mediation.pop("implementation", None) + new_mediation.pop("implementation-version", None) + del new_mediation["implementation-source"] + + if new_mediation: + # Only preserve the portion of configured + # mediations provided by the image administrator. + prop_mediators[m] = new_mediation + + for m, new_mediation in six.iteritems(prop_mediators): + # If after processing all mediation data, a source wasn't + # marked for a particular component, mark it as being + # sourced from 'system'. + if ( + "implementation-source" in new_mediation + and "version-source" not in new_mediation + ): + new_mediation["version-source"] = "system" + elif ( + "version-source" in new_mediation + and "implementation-source" not in new_mediation + ): + new_mediation["implementation-source"] = "system" + + # The proposed mediators become the new mediators (this accounts + # for mediation selection done as part of a packaging operation + # instead of being explicitly requested). + + # Initially assume mediation is changing. + self.pd._mediators_change = True + + for m in list(prop_mediators.keys()): + if m not in cfg_mediators: + if prop_mediators[m]: + # Fully-defined mediation not in previous + # configuration; mediation has changed. + break + + # No change in mediation; elide it. + del prop_mediators[m] + continue + + mediation = cfg_mediators[m] + if any( + k + for k in set( + list(prop_mediators[m].keys()) + list(mediation.keys()) + ) + if prop_mediators[m].get(k) != mediation.get(k) + ): + # Mediation has changed. + break + else: + for m in cfg_mediators: + if m not in prop_mediators: + # Mediation has been removed from + # configuration. + break + else: + self.pd._mediators_change = False + + self.pd._new_mediators = prop_mediators + # Link mediation is complete. + + def __check_reserved(self, action): + """Check whether files are delivered to var/pkg or + .org.opensolaris.pkg""" + + if not "path" in action.attrs: + return True + + dirs = [ + "cache", + "gui_cache", + "history", + "license", + "linked", + "lost+found", + "publisher", + "ssl", + "state", + ] + + # Also check whether files are delivered to other + # reserved directories besides var/pkg + if portable.osname == "sunos": + reserved_dirs = ["var/tmp", "var/share", "tmp", "system/volatile"] + else: + reserved_dirs = [] + + files = ["pkg5.image", "lock"] + path = action.get_installed_path(self.image.root) + dir_path = path + "/" + + for d in dirs: + dir_p = os.path.join(self.image.imgdir, d) + "/" + if dir_path.startswith(dir_p): + return False + for d in reserved_dirs: + dir_p = os.path.join(self.image.root, d) + "/" + # can package these directories but not deliver anything to them + if dir_path.startswith(dir_p) and dir_path != dir_p: + return False - def __is_target_removed(self, filepath): - """ Check to see if the named filepath is being removed in - the plan.""" - removed = self.pd.find_removal(filepath) - return removed - - def __finalize_mediation(self, prop_mediators, mediated_del_path_target): - """Merge requested and previously configured mediators that are - being set but don't affect the plan and update proposed image - configuration.""" - - cfg_mediators = self.pd._cfg_mediators - for m in self.pd._new_mediators: - prop_mediators.setdefault(m, self.pd._new_mediators[m]) - for m in cfg_mediators: - - mediation = cfg_mediators[m] - # Check to see if the proposed mediator has removed - # targets, if so then upon an update there will be - # invalid mediated links so add the target to the - # invalid list, otherwise it is okay so nothing to - # do. - if m in prop_mediators: - if m in mediated_del_path_target: - for target in mediated_del_path_target[m]: - if self.__is_target_removed(target): - self.invalid_meds[m].add(target) - continue - - new_mediation = mediation.copy() - if mediation.get("version-source") != "local": - new_mediation.pop("version", None) - del new_mediation["version-source"] - if mediation.get("implementation-source") != "local": - new_mediation.pop("implementation", None) - new_mediation.pop("implementation-version", None) - del new_mediation["implementation-source"] - - if new_mediation: - # Only preserve the portion of configured - # mediations provided by the image administrator. - prop_mediators[m] = new_mediation - - for m, new_mediation in six.iteritems(prop_mediators): - # If after processing all mediation data, a source wasn't - # marked for a particular component, mark it as being - # sourced from 'system'. - if "implementation-source" in new_mediation and \ - "version-source" not in new_mediation: - new_mediation["version-source"] = "system" - elif "version-source" in new_mediation and \ - "implementation-source" not in new_mediation: - new_mediation["implementation-source"] = "system" - - # The proposed mediators become the new mediators (this accounts - # for mediation selection done as part of a packaging operation - # instead of being explicitly requested). - - # Initially assume mediation is changing. - self.pd._mediators_change = True - - for m in list(prop_mediators.keys()): - if m not in cfg_mediators: - if prop_mediators[m]: - # Fully-defined mediation not in previous - # configuration; mediation has changed. - break - - # No change in mediation; elide it. - del prop_mediators[m] - continue - - mediation = cfg_mediators[m] - if any( - k - for k in set(list(prop_mediators[m].keys()) + - list(mediation.keys())) - if prop_mediators[m].get(k) != mediation.get(k)): - # Mediation has changed. - break - else: - for m in cfg_mediators: - if m not in prop_mediators: - # Mediation has been removed from - # configuration. - break - else: - self.pd._mediators_change = False - - self.pd._new_mediators = prop_mediators - # Link mediation is complete. - - def __check_reserved(self, action): - """Check whether files are delivered to var/pkg or - .org.opensolaris.pkg""" - - if not "path" in action.attrs: - return True - - dirs = ["cache", "gui_cache", "history", "license", - "linked", "lost+found", "publisher", "ssl", "state" - ] + for f in files: + fname = os.path.join(self.image.imgdir, f) + if path == fname: + return False + return True - # Also check whether files are delivered to other - # reserved directories besides var/pkg - if portable.osname == "sunos": - reserved_dirs = ["var/tmp", "var/share", "tmp", "system/volatile"] + def __check_excluded(self, path): + if self.__exclude_re is None: + img_exclude = self.image.get_property(imageconfig.EXCLUDE_PATTERNS) + if len(img_exclude): + exclude_regex = "^(?:" + ("|".join(img_exclude)) + ")" + if DebugValues["exclude"]: + print("Exclude Regex:", exclude_regex) + self.__exclude_re = relib.compile(exclude_regex) + else: + self.__exclude_re = "" + + if self.__exclude_re == "": + return False + if self.image.root != "/" and path.startswith(self.image.root[1:]): + path = path[len(self.image.root) :] + if DebugValues["exclude"]: + print("Checking exclude:", path) + return self.__exclude_re.search(path) + + def __merge_actions(self): + """Given a set of fmri changes and their associated pkg plan, + merge all the resultant actions for the packages being + updated.""" + + pt = self.__progtrack + if self.pd._new_mediators is None: + self.pd._new_mediators = {} + + if self.image.has_boot_archive(): + ramdisk_prefixes = tuple(self.image.get_ramdisk_filelist()) + if not ramdisk_prefixes: + self.pd._need_boot_archive = False + else: + self.pd._need_boot_archive = False + + # now combine all actions together to create a synthetic + # single step upgrade operation, and handle editable + # files moving from package to package. See theory + # comment in execute, below. + + for pp in self.pd.pkg_plans: + if pp.origin_fmri and pp.destination_fmri: + self.__target_update_count += 1 + elif pp.destination_fmri: + self.__target_install_count += 1 + elif pp.origin_fmri: + self.__target_removal_count += 1 + + # we now have a workable set of pkgplans to add/upgrade/remove + # now combine all actions together to create a synthetic single + # step upgrade operation, and handle editable files moving from + # package to package. See theory comment in execute, below. + self.pd.removal_actions = [] + + pt.plan_start(pt.PLAN_ACTION_MERGE) + # cache the current image mediators within the plan + cfg_mediators = self.pd._cfg_mediators = self.image.cfg.mediators + + mediated_removed_paths = set() + mediated_del_path_target = defaultdict(set) + for p in self.pd.pkg_plans: + pt.plan_add_progress(pt.PLAN_ACTION_MERGE) + for src, dest in p.gen_removal_actions(): + if DebugValues["actions"]: + print("Removal: " + str(src)) + if src.name == "user": + self.pd.removed_users[src.attrs["username"]] = p.origin_fmri + elif src.name == "group": + self.pd.removed_groups[ + src.attrs["groupname"] + ] = p.origin_fmri + + self.pd.removal_actions.append(_ActionPlan(p, src, dest)) + if ( + not (src.name == "link" or src.name == "hardlink") + or "mediator" not in src.attrs + ): + continue + + # Keep track of which mediated paths have been + # removed from the system so that which paths + # need to be repaired can be determined. + mediator = src.attrs["mediator"] + src_version = src.attrs.get("mediator-version") + src_impl = src.attrs.get("mediator-implementation") + + mediation = cfg_mediators.get(mediator) + if not mediation: + # Shouldn't happen, but if it does, + # drive on. + continue + + cfg_version = mediation.get("version") + if cfg_version: + # For comparison, version must be a + # string. + cfg_version = cfg_version.get_short_version() + + cfg_version_source = mediation.get("version-source") + cfg_impl = mediation.get("implementation") + cfg_impl_source = mediation.get("implementation-source") + + if src_version == cfg_version and mediator_impl_matches( + src_impl, cfg_impl + ): + mediated_removed_paths.add(src.attrs["path"]) + # Need the target to be a full path + # so it can be found in the plan. + if ( + cfg_version_source == "local" + or cfg_impl_source == "local" + ): + binpath = src.attrs["path"] + target = self.__full_target_path(binpath) + mediated_del_path_target[mediator].add(target) + + self.pd.update_actions = [] + self.pd._rm_aliases = {} + for p in self.pd.pkg_plans: + pt.plan_add_progress(pt.PLAN_ACTION_MERGE) + for src, dest in p.gen_update_actions(): + if DebugValues["actions"]: + print("Update:" + str(src)) + print(" " + str(dest)) + if dest.name == "user": + self.pd.added_users[ + dest.attrs["username"] + ] = p.destination_fmri + elif dest.name == "group": + self.pd.added_groups[ + dest.attrs["groupname"] + ] = p.destination_fmri + elif dest.name == "driver" and src: + rm = set(src.attrlist("alias")) - set( + dest.attrlist("alias") + ) + if rm: + self.pd._rm_aliases.setdefault( + dest.attrs["name"], set() + ).update(rm) + self.pd.update_actions.append(_ActionPlan(p, src, dest)) + + self.pd.install_actions = [] + errs = [] + for p in self.pd.pkg_plans: + pfmri = None + if p.destination_fmri: + pfmri = p.destination_fmri.get_fmri() + err_actions = api_errors.ImageBoundaryError(pfmri) + pt.plan_add_progress(pt.PLAN_ACTION_MERGE) + for src, dest in p.gen_install_actions(): + if DebugValues["actions"]: + print("Install: " + str(dest)) + if dest.name == "user": + self.pd.added_users[ + dest.attrs["username"] + ] = p.destination_fmri + elif dest.name == "group": + self.pd.added_groups[ + dest.attrs["groupname"] + ] = p.destination_fmri + # Check whether files are delivered in reserved + # locations. + if not self.__check_reserved(dest): + err_actions.append_error( + action=dest, + err_type=api_errors.ImageBoundaryError.RESERVED, + ) + self.pd.install_actions.append(_ActionPlan(p, src, dest)) + if not err_actions.isEmpty(): + errs.append(err_actions) + + if errs: + raise api_errors.ImageBoundaryErrors(errs) + + # In case a removed user or group was added back... + for entry in self.pd.added_groups.keys(): + if entry in self.pd.removed_groups: + del self.pd.removed_groups[entry] + for entry in self.pd.added_users.keys(): + if entry in self.pd.removed_users: + del self.pd.removed_users[entry] + + self.pd.state = plandesc.MERGED_OK + pt.plan_done(pt.PLAN_ACTION_MERGE) + + if not self.nothingtodo(): + self.__find_all_conflicts() + + pt.plan_start(pt.PLAN_ACTION_CONSOLIDATE) + ConsolidationEntry = namedtuple("ConsolidationEntry", "idx id") + + # cons_named maps original_name tags to the index into + # removal_actions so we can retrieve them later. cons_generic + # maps the (action.name, action.key-attribute-value) tuple to + # the same thing. The reason for both is that cons_named allows + # us to deal with files which change their path as well as their + # package, while cons_generic doesn't require the "receiving" + # package to have marked the file in any special way, plus + # obviously it handles all actions even if they don't have + # paths. + cons_named = {} + cons_generic = {} + + def hashify(v): + """handle key values that may be lists""" + if isinstance(v, list): + return frozenset(v) + else: + return v + + for i, ap in enumerate(self.pd.removal_actions): + if ap is None: + continue + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + + # If the action type needs to be reference-counted, make + # sure it doesn't get removed if another instance + # remains in the target image. + remove = True + if ( + ap.src.name == "dir" + and os.path.normpath(ap.src.attrs["path"]) + in self.__get_directories() + ): + remove = False + elif ap.src.name == "link" or ap.src.name == "hardlink": + lpath = os.path.normpath(ap.src.attrs["path"]) + if ap.src.name == "link": + inst_links = self.__get_symlinks() else: - reserved_dirs = [] - - files = ["pkg5.image", "lock"] - path = action.get_installed_path(self.image.root) - dir_path = path + "/" - - for d in dirs: - dir_p = os.path.join(self.image.imgdir, d) + "/" - if dir_path.startswith(dir_p): - return False - - for d in reserved_dirs: - dir_p = os.path.join(self.image.root, d) + "/" - # can package these directories but not deliver anything to them - if dir_path.startswith(dir_p) and dir_path != dir_p: - return False - - for f in files: - fname = os.path.join(self.image.imgdir, f) - if path == fname: - return False - return True - - def __check_excluded(self, path): - if self.__exclude_re is None: - img_exclude = self.image.get_property( - imageconfig.EXCLUDE_PATTERNS) - if len(img_exclude): - exclude_regex = "^(?:" + ( - "|".join(img_exclude)) + ")" - if DebugValues["exclude"]: - print("Exclude Regex:", exclude_regex) - self.__exclude_re = relib.compile(exclude_regex) - else: - self.__exclude_re = '' - - if self.__exclude_re == '': return False - if (self.image.root != '/' and - path.startswith(self.image.root[1:])): - path = path[len(self.image.root):] + inst_links = self.__get_hardlinks() + if self.__check_excluded(lpath): + if DebugValues["exclude"]: + print("!Removal:", lpath) + self.pd.elided_actions.append(ap) + remove = False + elif lpath in inst_links: + # Another link delivers to the same + # location, so assume it can't be + # safely removed initially. + remove = False + + # If link is mediated, and the mediator + # doesn't match the new mediation + # criteria, it is safe to remove. + mediator = ap.src.attrs.get("mediator") + if mediator in self.pd._new_mediators: + src_version = ap.src.attrs.get("mediator-version") + src_impl = ap.src.attrs.get("mediator-implementation") + dest_version = self.pd._new_mediators[mediator].get( + "version" + ) + if dest_version: + # Requested version needs + # to be a string for + # comparison. + dest_version = dest_version.get_short_version() + dest_impl = self.pd._new_mediators[mediator].get( + "implementation" + ) + if ( + dest_version is not None + and src_version != dest_version + ): + remove = True + if dest_impl is not None and not mediator_impl_matches( + src_impl, dest_impl + ): + remove = True + + elif ( + ap.src.name == "license" + and ap.src.attrs["license"] in self.__get_licenses() + ): + remove = False + elif ( + ap.src.name == "legacy" + and ap.src.attrs["pkg"] in self.__get_legacy() + ): + remove = False + elif "path" in ap.src.attrs and self.__check_excluded( + ap.src.attrs["path"] + ): + if DebugValues["exclude"]: + print("!Remove", ap.src.attrs["path"]) + self.pd.elided_actions.append(ap) + remove = False + + if not remove: + self.pd.removal_actions[i] = None + if "mediator" in ap.src.attrs: + mediated_removed_paths.discard(ap.src.attrs["path"]) + continue + + # store names of files being removed under own name + # or original name if specified + if ap.src.globally_identical: + attrs = ap.src.attrs + # Store the index into removal_actions and the + # id of the action object in that slot. + re = ConsolidationEntry(i, id(ap.src)) + cons_generic[ + (ap.src.name, hashify(attrs[ap.src.key_attr])) + ] = re + if ap.src.name == "file": + fname = attrs.get( + "original_name", + "{0}:{1}".format( + ap.p.origin_fmri.get_name(), attrs["path"] + ), + ) + cons_named[fname] = re + fname = None + attrs = re = None + + self.pd._actuators.scan_removal(ap) + if self.pd._need_boot_archive is None: + if self.pd._op != PKG_OP_DEHYDRATE and ap.src.attrs.get( + "path", "" + ).startswith(ramdisk_prefixes): + self.pd._need_boot_archive = True + + # reduce memory consumption + self.__directories = None + self.__symlinks = None + self.__hardlinks = None + self.__licenses = None + self.__legacy = None + + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + + # Construct a mapping from the install actions in a pkgplan to + # the position they have in the plan's list. This allows us to + # remove them efficiently later, if they've been consolidated. + # + # NOTE: This means that the action ordering in the package plans + # must remain fixed, at least for the duration of the imageplan + # evaluation. + plan_pos = {} + for p in self.pd.pkg_plans: + for i, a in enumerate(p.gen_install_actions()): + plan_pos[id(a[1])] = i + + # This keeps track of which pkgplans have had install actions + # consolidated away. + pp_needs_trimming = set() + + # This maps destination actions to the pkgplans they're + # associated with, which allows us to create the newly + # discovered update _ActionPlans. + dest_pkgplans = {} + + new_updates = [] + for i, ap in enumerate(self.pd.install_actions): + if ap is None: + continue + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + + if "path" in ap.dst.attrs and self.__check_excluded( + ap.dst.attrs["path"] + ): if DebugValues["exclude"]: - print("Checking exclude:", path) - return self.__exclude_re.search(path) - - def __merge_actions(self): - """Given a set of fmri changes and their associated pkg plan, - merge all the resultant actions for the packages being - updated.""" - - pt = self.__progtrack - if self.pd._new_mediators is None: - self.pd._new_mediators = {} - - if self.image.has_boot_archive(): - ramdisk_prefixes = tuple( - self.image.get_ramdisk_filelist()) - if not ramdisk_prefixes: - self.pd._need_boot_archive = False + print("!Install", ap.dst.attrs["path"]) + self.pd.elided_actions.append(ap) + pp_needs_trimming.add(ap.p) + ap.p.actions.added[plan_pos[id(ap.dst)]] = None + self.pd.install_actions[i] = None + + # In order to handle editable files that move their path + # or change pkgs, for all new files with original_name + # attribute, make sure file isn't being removed by + # checking removal list. If it is, tag removal to save + # file, and install to recover cached version... caching + # is needed if directories are removed or don't exist + # yet. + if ( + ap.dst.name == "file" + and "original_name" in ap.dst.attrs + and ap.dst.attrs["original_name"] in cons_named + ): + cache_name = ap.dst.attrs["original_name"] + index = cons_named[cache_name].idx + ra = self.pd.removal_actions[index].src + assert id(ra) == cons_named[cache_name].id + # If the paths match, don't remove and add; + # convert to update. + if ap.dst.attrs["path"] == ra.attrs["path"]: + new_updates.append((ra, ap.dst)) + # If we delete items here, the indices + # in cons_named will be bogus, so mark + # them for later deletion. + self.pd.removal_actions[index] = None + self.pd.install_actions[i] = None + # No need to handle it in cons_generic + # anymore + del cons_generic[("file", ra.attrs["path"])] + dest_pkgplans[id(ap.dst)] = ap.p else: - self.pd._need_boot_archive = False - - # now combine all actions together to create a synthetic - # single step upgrade operation, and handle editable - # files moving from package to package. See theory - # comment in execute, below. - - for pp in self.pd.pkg_plans: - if pp.origin_fmri and pp.destination_fmri: - self.__target_update_count += 1 - elif pp.destination_fmri: - self.__target_install_count += 1 - elif pp.origin_fmri: - self.__target_removal_count += 1 - - # we now have a workable set of pkgplans to add/upgrade/remove - # now combine all actions together to create a synthetic single - # step upgrade operation, and handle editable files moving from - # package to package. See theory comment in execute, below. - self.pd.removal_actions = [] + # The 'true' indicates the file should + # be removed from source. The removal + # action is changed using setdefault so + # that any overlay rules applied during + # conflict checking remain intact. + ra.attrs.setdefault("save_file", [cache_name, "true"]) + ap.dst.attrs["save_file"] = [cache_name, "true"] + + cache_name = index = ra = None + + # Similarly, try to prevent files (and other actions) + # from unnecessarily being deleted and re-created if + # they're simply moving between packages, but only if + # they keep their paths (or key-attribute values). + keyval = hashify(ap.dst.attrs.get(ap.dst.key_attr, None)) + if (ap.dst.name, keyval) in cons_generic: + nkv = ap.dst.name, keyval + index = cons_generic[nkv].idx + ra = self.pd.removal_actions[index].src + assert id(ra) == cons_generic[nkv].id + if keyval == ra.attrs[ra.key_attr]: + new_updates.append((ra, ap.dst)) + self.pd.removal_actions[index] = None + self.pd.install_actions[i] = None + dest_pkgplans[id(ap.dst)] = ap.p + # Add the action to the pkgplan's update + # list and mark it for removal from the + # install list. + ap.p.actions.changed.append((ra, ap.dst)) + ap.p.actions.added[plan_pos[id(ap.dst)]] = None + pp_needs_trimming.add(ap.p) + nkv = index = ra = None + + self.pd._actuators.scan_install(ap) + if self.pd._need_boot_archive is None: + if ap.dst.attrs.get("path", "").startswith(ramdisk_prefixes): + self.pd._need_boot_archive = True + + del ConsolidationEntry, cons_generic, cons_named, plan_pos + + # Remove from the pkgplans the install actions which have been + # consolidated away. + for p in pp_needs_trimming: + # Can't modify the p.actions tuple, so modify the added + # member in-place. + p.actions.added[:] = [a for a in p.actions.added if a is not None] + del pp_needs_trimming + + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + + # We want to cull out actions where they've not changed at all, + # leaving only the changed ones to put into + # self.pd.update_actions. + nu_src = manifest.Manifest() + nu_src.set_content( + content=(a[0] for a in new_updates), excludes=self.__old_excludes + ) + nu_dst = manifest.Manifest() + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + nu_dst.set_content( + content=(a[1] for a in new_updates), excludes=self.__new_excludes + ) + del new_updates + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + nu_add, nu_chg, nu_rem = nu_dst.difference( + nu_src, self.__old_excludes, self.__new_excludes + ) + pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) + # All the differences should be updates + assert not nu_add + assert not nu_rem + del nu_src, nu_dst + + # Extend update_actions with the new tuples. The package plan + # is the one associated with the action getting installed. + self.pd.update_actions.extend( + [ + _ActionPlan(dest_pkgplans[id(dst)], src, dst) + for src, dst in nu_chg + ] + ) + + del dest_pkgplans, nu_chg + + # Cull any update actions that are excluded by the exclusion + # patterns configured in the image. + for i, ap in enumerate(self.pd.update_actions): + if ap is None: + continue + path = None + if ( + ap.src + and "path" in ap.src.attrs + and self.__check_excluded(ap.src.attrs["path"]) + ): + path = ap.src.attrs["path"] + elif ( + ap.dst + and "path" in ap.dst.attrs + and self.__check_excluded(ap.dst.attrs["path"]) + ): + path = ap.dst.attrs["path"] + + if path: + if DebugValues["exclude"]: + print("!Update", path) + self.pd.elided_actions.append(ap) + self.pd.update_actions[i] = None + + pt.plan_done(pt.PLAN_ACTION_CONSOLIDATE) + pt.plan_start(pt.PLAN_ACTION_MEDIATION) + pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) + + # Mediate and repair links affected by the plan. + prop_mediators = self.__mediate_links(mediated_removed_paths) + + pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) + for prop in ("removal_actions", "install_actions", "update_actions"): + pval = getattr(self.pd, prop) + pval[:] = [a for a in pval if a is not None] + + pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) + + # Add any necessary repairs to plan. + self.__evaluate_fixups() + + pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) + + # Finalize link mediation. + self.__finalize_mediation(prop_mediators, mediated_del_path_target) + + pt.plan_done(pt.PLAN_ACTION_MEDIATION) + + if DebugValues["actions"]: + print("--- Final actions ---") + for prop in ( + "removal_actions", + "install_actions", + "update_actions", + ): + key = prop.split("_")[0] + for a in getattr(self.pd, prop): + print("{0} {1}".format(key, str(a))) + + pt.plan_start(pt.PLAN_ACTION_FINALIZE) + + # Go over update actions + l_refresh = [] + l_actions = {} + if self.pd.update_actions: + # iterating over actions is slow, so don't do it + # unless we have to. + l_actions = self.get_actions("hardlink", self.hardlink_keyfunc) + for a in self.pd.update_actions: + # For any files being updated that are the target of + # _any_ hardlink actions, append the hardlink actions + # to the update list so that they are not broken. + # Since we reference count hardlinks, update each one + # only once. + if a[2].name == "file": + path = a[2].attrs["path"] + if path in l_actions: + unique_links = dict( + (l.attrs["path"], l) for l in l_actions[path] + ) + l_refresh.extend( + [_ActionPlan(a[0], l, l) for l in unique_links.values()] + ) + path = None + + # scan both old and new actions + # repairs may result in update action w/o orig action + self.pd._actuators.scan_update(a) + if self.pd._need_boot_archive is None: + if a[2].attrs.get("path", "").startswith(ramdisk_prefixes): + self.pd._need_boot_archive = True + + self.pd.update_actions.extend(l_refresh) + + # sort actions to match needed processing order + remsort = operator.itemgetter(1) + addsort = operator.itemgetter(2) + self.pd.removal_actions.sort(key=remsort, reverse=True) + self.pd.update_actions.sort(key=addsort) + self.pd.install_actions.sort(key=addsort) + + # find the first and last hardlink in the install_actions + fhl = lhl = -1 + for i, ap in enumerate(self.pd.install_actions): + if ap.dst.name == "hardlink": + if fhl == -1: + fhl = i + lhl = i + elif fhl != -1: + break + + # now reorder the hardlinks to respect inter-dependencies + if fhl != -1: + hardlinks = self.pd.install_actions[fhl : lhl + 1] + hardlinks = _reorder_hardlinks(hardlinks) + self.pd.install_actions[fhl : lhl + 1] = hardlinks + + # cleanup pkg_plan objects which don't actually contain any + # changes and add any new ones to list of changes + for p in list(self.pd.pkg_plans): + if ( + p.origin_fmri != p.destination_fmri + or p.actions.removed + or p.actions.changed + or p.actions.added + ): + pair = (p.origin_fmri, p.destination_fmri) + if pair not in self.pd._fmri_changes: + self.pd._fmri_changes.append(pair) + continue + self.pd.pkg_plans.remove(p) + fmri = p.origin_fmri + if (fmri, fmri) in self.pd._fmri_changes: + self.pd._fmri_changes.remove((fmri, fmri)) + del p + + # + # Sort the package plans by fmri to create predictability (and + # some sense of order) in the download output; this is not + # a perfect sort of this, but we only really care for things + # we fetch over the wire. + # + def key_func(a): + if a.destination_fmri: + return a.destination_fmri + return "" + + self.pd.pkg_plans.sort(key=key_func) + + pt.plan_done(pt.PLAN_ACTION_FINALIZE) + + if self.pd._need_boot_archive is None: + self.pd._need_boot_archive = False + + self.pd.state = plandesc.EVALUATED_OK + + def nothingtodo(self): + """Test whether this image plan contains any work to do""" + + if self.pd.state in [plandesc.EVALUATED_PKGS, plandesc.MERGED_OK]: + return not ( + self.pd._fmri_changes + or self.pd._new_variants + or (self.pd._new_facets is not None) + or self.pd._mediators_change + or self.pd.pkg_plans + ) + elif self.pd.state >= plandesc.EVALUATED_OK: + return not ( + self.pd.pkg_plans + or self.pd._new_variants + or (self.pd._new_facets is not None) + or self.pd._mediators_change + ) + assert 0, "Shouldn't call nothingtodo() for state = {0:d}".format( + self.pd.state + ) + + def preexecute(self): + """Invoke the evaluated image plan + preexecute, execute and postexecute + execute actions need to be sorted across packages + """ + + assert self.pd.state == plandesc.EVALUATED_OK + + if self.pd._image_lm != self.image.get_last_modified(string=True): + # State has been modified since plan was created; this + # plan is no longer valid. + self.pd.state = plandesc.PREEXECUTED_ERROR + raise api_errors.InvalidPlanError() + + if self.nothingtodo(): + self.pd.state = plandesc.PREEXECUTED_OK + return + + if self.image.version != self.image.CURRENT_VERSION: + # Prevent plan execution if image format isn't current. + raise api_errors.ImageFormatUpdateNeeded(self.image.root) + + if DebugValues["plandesc_validate"]: + # get a json copy of the plan description so that + # later we can verify that it wasn't updated during + # the pre-execution stage. + pd_json1 = self.pd.getstate(self.pd, reset_volatiles=True) + + # Checks the index to make sure it exists and is + # consistent. If it's inconsistent an exception is thrown. + # If it's totally absent, it will index the existing packages + # so that the incremental update that follows at the end of + # the function will work correctly. It also repairs the index + # for this BE so the user can boot into this BE and have a + # correct index. + if self.update_index: + ind = None + try: + self.image.update_index_dir() + ind = indexer.Indexer( + self.image, + self.image.get_manifest, + self.image.get_manifest_path, + progtrack=self.__progtrack, + excludes=self.__old_excludes, + ) + if ind.check_index_existence(): + try: + ind.check_index_has_exactly_fmris( + self.image.gen_installed_pkg_names() + ) + except se.IncorrectIndexFileHash as e: + self.__preexecuted_indexing_error = ( + api_errors.WrapSuccessfulIndexingException( + e, + traceback.format_exc(), + traceback.format_stack(), + ) + ) + ind.rebuild_index_from_scratch( + self.image.gen_installed_pkgs() + ) + except se.IndexingException as e: + # If there's a problem indexing, we want to + # attempt to finish the installation anyway. If + # there's a problem updating the index on the + # new image, that error needs to be + # communicated to the user. + self.__preexecuted_indexing_error = ( + api_errors.WrapSuccessfulIndexingException( + e, traceback.format_exc(), traceback.format_stack() + ) + ) + + # No longer needed. + del ind + + # check if we're going to have enough room + # stat fs again just in case someone else is using space... + self.__update_avail_space() + if self.pd._cbytes_added > self.pd._cbytes_avail: + raise api_errors.ImageInsufficentSpace( + self.pd._cbytes_added, + self.pd._cbytes_avail, + _("Download cache"), + ) + if self.pd._bytes_added > self.pd._bytes_avail: + raise api_errors.ImageInsufficentSpace( + self.pd._bytes_added, self.pd._bytes_avail, _("Root filesystem") + ) + + # Remove history about manifest/catalog transactions. This + # helps the stats engine by only considering the performance of + # bulk downloads. + self.image.transport.stats.reset() + + # + # Calculate size of data retrieval and pass it to progress + # tracker. + # + npkgs = nfiles = nbytes = 0 + for p in self.pd.pkg_plans: + nf, nb = p.get_xferstats() + nbytes += nb + nfiles += nf + + # It's not perfectly accurate but we count a download + # even if the package will do zero data transfer. This + # makes the pkg stats consistent between download and + # install. + npkgs += 1 + self.__progtrack.download_set_goal(npkgs, nfiles, nbytes) + + lic_errors = [] + try: + # Check for license acceptance issues first to avoid + # wasted time in the download phase and so failure + # can occur early. + for p in self.pd.pkg_plans: + try: + p.preexecute() + except api_errors.PkgLicenseErrors as e: + # Accumulate all license errors. + lic_errors.append(e) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise - pt.plan_start(pt.PLAN_ACTION_MERGE) - # cache the current image mediators within the plan - cfg_mediators = self.pd._cfg_mediators = \ - self.image.cfg.mediators + if lic_errors: + raise api_errors.PlanLicenseErrors(lic_errors) - mediated_removed_paths = set() - mediated_del_path_target = defaultdict(set) + try: for p in self.pd.pkg_plans: - pt.plan_add_progress(pt.PLAN_ACTION_MERGE) - for src, dest in p.gen_removal_actions(): - if DebugValues["actions"]: - print("Removal: " + str(src)) - if src.name == "user": - self.pd.removed_users[src.attrs[ - "username"]] = p.origin_fmri - elif src.name == "group": - self.pd.removed_groups[src.attrs[ - "groupname"]] = p.origin_fmri - - self.pd.removal_actions.append( - _ActionPlan(p, src, dest)) - if (not (src.name == "link" or - src.name == "hardlink") or - "mediator" not in src.attrs): - continue - - # Keep track of which mediated paths have been - # removed from the system so that which paths - # need to be repaired can be determined. - mediator = src.attrs["mediator"] - src_version = src.attrs.get( - "mediator-version") - src_impl = src.attrs.get( - "mediator-implementation") - - mediation = cfg_mediators.get(mediator) - if not mediation: - # Shouldn't happen, but if it does, - # drive on. - continue - - cfg_version = mediation.get("version") - if cfg_version: - # For comparison, version must be a - # string. - cfg_version = \ - cfg_version.get_short_version() - - cfg_version_source = mediation.get("version-source") - cfg_impl = mediation.get("implementation") - cfg_impl_source = mediation.get("implementation-source") - - if src_version == cfg_version and \ - mediator_impl_matches(src_impl, cfg_impl): - mediated_removed_paths.add( - src.attrs["path"]) - # Need the target to be a full path - # so it can be found in the plan. - if cfg_version_source == "local" or \ - cfg_impl_source == "local": - binpath = src.attrs["path"] - target = self.__full_target_path(binpath) - mediated_del_path_target[mediator].add(target) + p.download(self.__progtrack, self.__check_cancel) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + except ( + api_errors.InvalidDepotResponseException, + api_errors.TransportError, + ) as e: + if p and p._autofix_pkgs: + e._autofix_pkgs = p._autofix_pkgs + raise + + self.image.transport.shutdown() + self.__progtrack.download_done() + except: + self.pd.state = plandesc.PREEXECUTED_ERROR + raise + + self.pd.state = plandesc.PREEXECUTED_OK + + if DebugValues["plandesc_validate"]: + # verify that preexecution did not update the plan + pd_json2 = self.pd.getstate(self.pd, reset_volatiles=True) + pkg.misc.json_diff( + "PlanDescription", pd_json1, pd_json2, pd_json1, pd_json2 + ) + del pd_json1, pd_json2 + + def execute(self): + """Invoke the evaluated image plan + preexecute, execute and postexecute + execute actions need to be sorted across packages + """ + assert self.pd.state == plandesc.PREEXECUTED_OK + + if self.pd._image_lm != self.image.get_last_modified(string=True): + # State has been modified since plan was created; this + # plan is no longer valid. + self.pd.state = plandesc.EXECUTED_ERROR + raise api_errors.InvalidPlanError() + + # load data from previously downloaded actions + try: + for p in self.pd.pkg_plans: + p.cacheload() + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + raise + + # check for available space + self.__update_avail_space() + if ( + self.pd._bytes_added - self.pd._cbytes_added + ) > self.pd._bytes_avail: + raise api_errors.ImageInsufficentSpace( + self.pd._bytes_added - self.pd._cbytes_added, + self.pd._bytes_avail, + _("Root filesystem"), + ) + + # + # what determines execution order? + # + # The following constraints are key in understanding imageplan + # execution: + # + # 1) All non-directory actions (files, users, hardlinks, + # symbolic links, etc.) must appear in only a single installed + # package. + # + # 2) All installed packages must be consistent in their view of + # action types; if /usr/openwin is a directory in one package, + # it must be a directory in all packages, never a symbolic link; + # this includes implicitly defined directories. + # + # A key goal in IPS is to be able to undergo an arbitrary + # transformation in package contents in a single step. Packages + # must be able to exchange files, convert directories to + # symbolic links, etc.; so long as the start and end states meet + # the above two constraints IPS must be able to transition + # between the states directly. This leads to the following: + # + # 1) All actions must be ordered across packages; packages + # cannot be updated one at a time. + # + # This is readily apparent when one considers two packages + # exchanging files in their new versions; in each case the + # package now owning the file must be installed last, but it + # is not possible for each package to be installed before the + # other. Clearly, all the removals must be done first, + # followed by the installs and updates. + # + # 2) Installs of new actions must precede updates of existing + # ones. + # + # In order to accommodate changes of file ownership of + # existing files to a newly created user, it is necessary + # for the installation of that user to precede the update of + # files to reflect their new ownership. + # + # The exception to this rule is driver actions. Aliases of + # existing drivers which are going to be removed must be + # removed before any new drivers are installed or updated. + # This prevents an error if an alias is moving from one + # driver to another. + + if self.nothingtodo(): + self.pd.state = plandesc.EXECUTED_OK + return + + pt = self.__progtrack + pt.set_major_phase(pt.PHASE_EXECUTE) + + # It's necessary to do this check here because the state of the + # image before the current operation is performed is desired. + empty_image = self.__is_image_empty() + + if not empty_image: + # Before proceeding, remove fast lookups database so + # that if _create_fast_lookups is interrupted later the + # client isn't left with invalid state. + self.image._remove_fast_lookups() + + if not self.image.is_liveroot(): + # Check if the child is a running zone. If so run the + # actuator in the zone. + + # Linked Image code uses trailing slashes, Image code + # does not. So we make sure that our path comparisons + # are always on tha same page. + root = os.path.normpath(self.image.root) + + rzones = zone.list_running_zones() + for z, path in six.iteritems(rzones): + if os.path.normpath(path) == root: + self.pd._actuators.set_zone(z) + # there should be only on zone per path + break + + self.pd._actuators.exec_prep(self.image) + + self.pd._actuators.exec_pre_actuators(self.image) + + # List of tuples of (src, dest) used to track each pkgplan so + # that it can be discarded after execution. + executed_pp = [] + try: + try: + pt.actions_set_goal( + pt.ACTION_REMOVE, len(self.pd.removal_actions) + ) + pt.actions_set_goal( + pt.ACTION_INSTALL, len(self.pd.install_actions) + ) + pt.actions_set_goal( + pt.ACTION_UPDATE, len(self.pd.update_actions) + ) - self.pd.update_actions = [] - self.pd._rm_aliases = {} - for p in self.pd.pkg_plans: - pt.plan_add_progress(pt.PLAN_ACTION_MERGE) - for src, dest in p.gen_update_actions(): - if DebugValues["actions"]: - print("Update:" + str(src)) - print(" " + str(dest)) - if dest.name == "user": - self.pd.added_users[dest.attrs[ - "username"]] = p.destination_fmri - elif dest.name == "group": - self.pd.added_groups[dest.attrs[ - "groupname"]] = p.destination_fmri - elif dest.name == "driver" and src: - rm = \ - set(src.attrlist("alias")) - \ - set(dest.attrlist("alias")) - if rm: - self.pd._rm_aliases.setdefault( - dest.attrs["name"], - set()).update(rm) - self.pd.update_actions.append( - _ActionPlan(p, src, dest)) + # execute removals + for p, src, dest in self.pd.removal_actions: + p.execute_removal(src, dest) + pt.actions_add_progress(pt.ACTION_REMOVE) + pt.actions_done(pt.ACTION_REMOVE) + + # Update driver alias database to reflect the + # aliases drivers have lost in the new image. + # This prevents two drivers from ever attempting + # to have the same alias at the same time. + for name, aliases in six.iteritems(self.pd._rm_aliases): + driver.DriverAction.remove_aliases( + name, aliases, self.image + ) + + # Done with removals; discard them so memory can + # be re-used. + self.pd.removal_actions = [] + # execute installs; if action throws a retry + # exception try it again afterwards. + retries = [] + for p, src, dest in self.pd.install_actions: + try: + p.execute_install(src, dest) + pt.actions_add_progress(pt.ACTION_INSTALL) + except pkg.actions.ActionRetry: + retries.append((p, src, dest)) + for p, src, dest in retries: + p.execute_retry(src, dest) + pt.actions_add_progress(pt.ACTION_INSTALL) + retries = [] + pt.actions_done(pt.ACTION_INSTALL) + + # Done with installs, so discard them so memory + # can be re-used. self.pd.install_actions = [] - errs = [] - for p in self.pd.pkg_plans: - pfmri = None - if p.destination_fmri: - pfmri = p.destination_fmri.get_fmri() - err_actions = api_errors.ImageBoundaryError(pfmri) - pt.plan_add_progress(pt.PLAN_ACTION_MERGE) - for src, dest in p.gen_install_actions(): - if DebugValues["actions"]: - print("Install: " + str(dest)) - if dest.name == "user": - self.pd.added_users[dest.attrs[ - "username"]] = p.destination_fmri - elif dest.name == "group": - self.pd.added_groups[dest.attrs[ - "groupname"]] = p.destination_fmri - # Check whether files are delivered in reserved - # locations. - if not self.__check_reserved(dest): - err_actions.append_error( - action=dest, - err_type=api_errors.\ - ImageBoundaryError.RESERVED) - self.pd.install_actions.append( - _ActionPlan(p, src, dest)) - if not err_actions.isEmpty(): - errs.append(err_actions) - - if errs: - raise api_errors.ImageBoundaryErrors(errs) - - # In case a removed user or group was added back... - for entry in self.pd.added_groups.keys(): - if entry in self.pd.removed_groups: - del self.pd.removed_groups[entry] - for entry in self.pd.added_users.keys(): - if entry in self.pd.removed_users: - del self.pd.removed_users[entry] - - self.pd.state = plandesc.MERGED_OK - pt.plan_done(pt.PLAN_ACTION_MERGE) - - if not self.nothingtodo(): - self.__find_all_conflicts() - - pt.plan_start(pt.PLAN_ACTION_CONSOLIDATE) - ConsolidationEntry = namedtuple("ConsolidationEntry", "idx id") - - # cons_named maps original_name tags to the index into - # removal_actions so we can retrieve them later. cons_generic - # maps the (action.name, action.key-attribute-value) tuple to - # the same thing. The reason for both is that cons_named allows - # us to deal with files which change their path as well as their - # package, while cons_generic doesn't require the "receiving" - # package to have marked the file in any special way, plus - # obviously it handles all actions even if they don't have - # paths. - cons_named = {} - cons_generic = {} - - def hashify(v): - """handle key values that may be lists""" - if isinstance(v, list): - return frozenset(v) - else: - return v - - for i, ap in enumerate(self.pd.removal_actions): - if ap is None: - continue - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - - # If the action type needs to be reference-counted, make - # sure it doesn't get removed if another instance - # remains in the target image. - remove = True - if ap.src.name == "dir" and \ - os.path.normpath(ap.src.attrs["path"]) in \ - self.__get_directories(): - remove = False - elif ap.src.name == "link" or ap.src.name == "hardlink": - lpath = os.path.normpath(ap.src.attrs["path"]) - if ap.src.name == "link": - inst_links = self.__get_symlinks() - else: - inst_links = self.__get_hardlinks() - if self.__check_excluded(lpath): - if DebugValues["exclude"]: - print("!Removal:", lpath) - self.pd.elided_actions.append(ap) - remove = False - elif lpath in inst_links: - # Another link delivers to the same - # location, so assume it can't be - # safely removed initially. - remove = False - - # If link is mediated, and the mediator - # doesn't match the new mediation - # criteria, it is safe to remove. - mediator = ap.src.attrs.get("mediator") - if mediator in self.pd._new_mediators: - src_version = ap.src.attrs.get( - "mediator-version") - src_impl = ap.src.attrs.get( - "mediator-implementation") - dest_version = \ - self.pd._new_mediators[mediator].get( - "version") - if dest_version: - # Requested version needs - # to be a string for - # comparison. - dest_version = \ - dest_version.get_short_version() - dest_impl = \ - self.pd._new_mediators[mediator].get( - "implementation") - if dest_version is not None and \ - src_version != dest_version: - remove = True - if dest_impl is not None and \ - not mediator_impl_matches( - src_impl, dest_impl): - remove = True - - elif ap.src.name == "license" and \ - ap.src.attrs["license"] in self.__get_licenses(): - remove = False - elif ap.src.name == "legacy" and \ - ap.src.attrs["pkg"] in self.__get_legacy(): - remove = False - elif "path" in ap.src.attrs and \ - self.__check_excluded(ap.src.attrs["path"]): - if DebugValues["exclude"]: - print("!Remove", ap.src.attrs["path"]) - self.pd.elided_actions.append(ap) - remove = False - - if not remove: - self.pd.removal_actions[i] = None - if "mediator" in ap.src.attrs: - mediated_removed_paths.discard( - ap.src.attrs["path"]) - continue - # store names of files being removed under own name - # or original name if specified - if ap.src.globally_identical: - attrs = ap.src.attrs - # Store the index into removal_actions and the - # id of the action object in that slot. - re = ConsolidationEntry(i, id(ap.src)) - cons_generic[(ap.src.name, - hashify(attrs[ap.src.key_attr]))] = re - if ap.src.name == "file": - fname = attrs.get("original_name", - "{0}:{1}".format( - ap.p.origin_fmri.get_name(), - attrs["path"])) - cons_named[fname] = re - fname = None - attrs = re = None - - self.pd._actuators.scan_removal(ap) - if self.pd._need_boot_archive is None: - if self.pd._op != PKG_OP_DEHYDRATE and \ - ap.src.attrs.get("path", "").startswith( - ramdisk_prefixes): - self.pd._need_boot_archive = True - - # reduce memory consumption - self.__directories = None - self.__symlinks = None - self.__hardlinks = None - self.__licenses = None - self.__legacy = None - - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - - # Construct a mapping from the install actions in a pkgplan to - # the position they have in the plan's list. This allows us to - # remove them efficiently later, if they've been consolidated. - # - # NOTE: This means that the action ordering in the package plans - # must remain fixed, at least for the duration of the imageplan - # evaluation. - plan_pos = {} - for p in self.pd.pkg_plans: - for i, a in enumerate(p.gen_install_actions()): - plan_pos[id(a[1])] = i + # execute updates; in some cases there may be + # a retryable exception, so capture those and + # retry after running through all the + # actions(which might address the reason for + # the retryable exception). + # An example is a user action that depends + # upon a file existing (ie ftpusers). + retries = [] + for p, src, dest in self.pd.update_actions: + try: + p.execute_update(src, dest) + pt.actions_add_progress(pt.ACTION_UPDATE) + except pkg.actions.ActionRetry: + retries.append((p, src, dest)) + + for p, src, dest in retries: + p.execute_retry(src, dest) + pt.actions_add_progress(pt.ACTION_UPDATE) + retries = [] + + pt.actions_done(pt.ACTION_UPDATE) + pt.actions_all_done() + pt.set_major_phase(pt.PHASE_FINALIZE) + + # Done with updates, so discard them so memory + # can be re-used. + self.pd.update_actions = [] + + # handle any postexecute operations + while self.pd.pkg_plans: + # postexecute in reverse, but pkg_plans + # aren't ordered, so does it matter? + # This allows the pkgplan objects to be + # discarded as they're executed which + # allows memory to be-reused sooner. + p = self.pd.pkg_plans.pop() + p.postexecute() + executed_pp.append((p.destination_fmri, p.origin_fmri)) + p = None + + # save package state + self.image.update_pkg_installed_state( + executed_pp, self.__progtrack, self.__match_inst.keys() + ) + # no longer needed + self.__match_inst = {} + + # write out variant changes to the image config + if self.pd._varcets_change or self.pd._mediators_change: + self.image.image_config_update( + self.pd._new_variants, + self.pd._new_facets, + self.pd._new_mediators, + ) + # write out any changes + self.image._avoid_set_save(*self.pd._new_avoid_obs) + # An essential step to set the property + # "dehydrated" if dehydrate/rehydrate succeeds. + if self.pd._op in (PKG_OP_DEHYDRATE, PKG_OP_REHYDRATE): + self.image.cfg.set_property( + "property", "dehydrated", self.operations_pubs + ) + self.image.save_config() + else: + # Mark image as modified if not calling + # save_config (which will do it for us). + self.image.update_last_modified() + + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EPERM: + raise api_errors.PermissionsException(e.filename) + elif e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + elif e.errno == errno.ELOOP: + act = pkg.actions.unknown.UnknownAction() + raise api_errors.ActionExecutionError( + act, + _( + "A link targeting itself or " + "part of a link loop was found at " + "'{0}'; a file or directory was " + "expected. Please remove the link " + "and try again." + ).format(e.filename), + ) + raise + except pkg.actions.ActionError: + exc_type, exc_value, exc_tb = sys.exc_info() + self.pd.state = plandesc.EXECUTED_ERROR + try: + self.pd._actuators.exec_fail_actuators(self.image) + except: + # Ensure the real cause of failure is raised. + pass + if six.PY2: + six.reraise( + api_errors.InvalidPackageErrors([exc_value]), None, exc_tb + ) + else: + # six.reraise requires the first argument + # callable if the second argument is None. + # Also the traceback is automatically attached, + # in Python 3, so we can simply raise it. + raise api_errors.InvalidPackageErrors([exc_value]) + except: + exc_type, exc_value, exc_tb = sys.exc_info() + self.pd.state = plandesc.EXECUTED_ERROR + try: + self.pd._actuators.exec_fail_actuators(self.image) + finally: + # This ensures that the original exception and + # traceback are used if exec_fail_actuators + # fails. + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + + else: + self.pd._actuators.exec_post_actuators(self.image) + + self.image._create_fast_lookups(progtrack=self.__progtrack) + self.__save_release_notes() + + # success + self.pd.state = plandesc.EXECUTED_OK + self.pd._executed_ok() + + # reduce memory consumption + self.saved_files = {} + self.valid_directories = set() + self.__cached_actions = {} + + # Clear out the primordial user and group caches. + self.image._users = set() + self.image._groups = set() + self.image._usersbyname = {} + self.image._groupsbyname = {} + + # Perform the incremental update to the search indexes + # for all changed packages + if self.update_index: + self.image.update_index_dir() + ind = indexer.Indexer( + self.image, + self.image.get_manifest, + self.image.get_manifest_path, + progtrack=self.__progtrack, + excludes=self.__new_excludes, + ) + try: + if empty_image: + ind.setup() + if empty_image or ind.check_index_existence(): + ind.client_update_index(([], executed_pp), self.image) + except KeyboardInterrupt: + raise + except se.ProblematicPermissionsIndexException: + # ProblematicPermissionsIndexException + # is included here as there's little + # chance that trying again will fix this + # problem. + raise api_errors.WrapIndexingException( + e, traceback.format_exc(), traceback.format_stack() + ) + except Exception as e: + # It's important to delete and rebuild + # from scratch rather than using the + # existing indexer because otherwise the + # state will become confused. + del ind + # XXX Once we have a framework for + # emitting a message to the user in this + # spot in the code, we should tell them + # something has gone wrong so that we + # continue to get feedback to allow + # us to debug the code. + try: + ind = indexer.Indexer( + self.image, + self.image.get_manifest, + self.image.get_manifest_path, + progtrack=self.__progtrack, + excludes=self.__new_excludes, + ) + ind.rebuild_index_from_scratch( + self.image.gen_installed_pkgs() + ) + except Exception as e: + raise api_errors.WrapIndexingException( + e, traceback.format_exc(), traceback.format_stack() + ) + raise api_errors.WrapSuccessfulIndexingException( + e, traceback.format_exc(), traceback.format_stack() + ) + if self.__preexecuted_indexing_error is not None: + raise self.__preexecuted_indexing_error + + # As the very last thing, check if there are any broken + # mediators. + if self.invalid_meds and not self.image.is_zone(): + medmsg = self.__make_med_msg() + raise api_errors.InvalidMediatorTarget(medmsg) + + def __is_image_empty(self): + try: + next(self.image.gen_installed_pkg_names()) + return False + except StopIteration: + return True + + @staticmethod + def match_user_stems( + image, + patterns, + match_type, + raise_unmatched=True, + raise_not_installed=True, + return_matchdict=False, + universe=None, + ): + """Given a user specified list of patterns, return a set + of matching package stems. Any versions specified are + ignored. + + 'match_type' indicates how matching should be restricted. The + possible values are: + + MATCH_ALL + Matching is performed using all known package stems. + + MATCH_INST_VERSIONS + Matching is performed using only installed package + stems. + + MATCH_UNINSTALLED + Matching is performed using uninstalled packages; + it is an error for a pattern to match an installed + package. + + Note that patterns starting w/ pkg:/ require an exact match; + patterns containing '*' will using fnmatch rules; the default + trailing match rules are used for remaining patterns. + + Exactly duplicated patterns are ignored. + + Routine raises PlanCreationException if errors occur: it is + illegal to specify multiple different patterns that match the + same pkg name. Only patterns that contain wildcards are allowed + to match multiple packages. + + 'raise_unmatched' determines whether an exception will be + raised if any patterns didn't match any packages. + + 'raise_not_installed' determines whether an exception will be + raised if any pattern matches a package that's not installed. + + 'return_matchdict' determines whether the dictionary containing + which patterns matched which stems or the list of stems is + returned. + + 'universe' contains a list of tuples of publishers and package + names against which the patterns should be matched. + """ + # avoid checking everywhere + if not patterns: + return set() + + illegals = [] + nonmatch = [] + multimatch = [] + not_installed = [] + multispec = [] + already_installed = [] + + matchers = [] + fmris = [] + pubs = [] + + wildcard_patterns = set() + + # ignore dups + patterns = list(set(patterns)) + + # figure out which kind of matching rules to employ + seen = set() + npatterns = [] + for pat in patterns: + try: + parts = pat.split("@", 1) + pat_stem = parts[0] + + if "*" in pat_stem or "?" in pat_stem: + matcher = pkg.fmri.glob_match + wildcard_patterns.add(pat) + elif pat_stem.startswith("pkg:/") or pat_stem.startswith("/"): + matcher = pkg.fmri.exact_name_match + else: + matcher = pkg.fmri.fmri_match - # This keeps track of which pkgplans have had install actions - # consolidated away. - pp_needs_trimming = set() + if matcher == pkg.fmri.glob_match: + fmri = pkg.fmri.MatchingPkgFmri(pat_stem) + else: + fmri = pkg.fmri.PkgFmri(pat_stem) + + sfmri = str(fmri) + if sfmri in seen: + # A different form of the same pattern + # was specified already; ignore this + # one (e.g. pkg:/network/ping, + # /network/ping). + wildcard_patterns.discard(pat) + continue + + seen.add(sfmri) + npatterns.append(pat) + matchers.append(matcher) + pubs.append(fmri.publisher) + fmris.append(fmri) + except pkg.fmri.FmriError as e: + illegals.append(e) + patterns = npatterns + del npatterns, seen + + # Create a dictionary of patterns, with each value being a + # set of pkg names that match that pattern. + ret = dict(zip(patterns, [set() for i in patterns])) + + if universe is not None: + assert match_type == ImagePlan.MATCH_ALL + pkg_names = universe + else: + if match_type != ImagePlan.MATCH_INST_VERSIONS: + cat = image.get_catalog(image.IMG_CATALOG_KNOWN) + else: + cat = image.get_catalog(image.IMG_CATALOG_INSTALLED) + pkg_names = cat.pkg_names() + + # construct matches for each pattern + for pkg_pub, name in pkg_names: + for pat, matcher, fmri, pub in zip(patterns, matchers, fmris, pubs): + if pub and pkg_pub != pub: + continue + if matcher(name, fmri.pkg_name): + ret[pat].add(name) + + matchdict = {} + for p in patterns: + l = len(ret[p]) + if l == 0: # no matches at all + nonmatch.append(p) + elif l > 1 and p not in wildcard_patterns: + # multiple matches + multimatch.append((p, list(ret[p]))) + else: + # single match or wildcard + for k in ret[p]: + # for each matching package name + matchdict.setdefault(k, []).append(p) + + for name in matchdict: + if len(matchdict[name]) > 1: + # different pats, same pkg + multispec.append(tuple([name] + matchdict[name])) + + if match_type == ImagePlan.MATCH_INST_VERSIONS: + not_installed, nonmatch = nonmatch, not_installed + elif match_type == ImagePlan.MATCH_UNINSTALLED: + already_installed = [ + name + for name in image.get_catalog( + image.IMG_CATALOG_INSTALLED + ).names() + if name in matchdict + ] + if ( + illegals + or (raise_unmatched and nonmatch) + or multimatch + or (not_installed and raise_not_installed) + or multispec + or already_installed + ): + raise api_errors.PlanCreationException( + already_installed=already_installed, + illegal=illegals, + missing_matches=not_installed, + multiple_matches=multimatch, + multispec=multispec, + unmatched_fmris=nonmatch, + ) + + if return_matchdict: + return matchdict + return set(matchdict.keys()) + + @staticmethod + def __match_user_fmris( + image, + patterns, + match_type, + pub_ranks=misc.EmptyDict, + installed_pkgs=misc.EmptyDict, + raise_not_installed=True, + reject_set=misc.EmptyI, + default_matcher=None, + ): + """Given a user-specified list of patterns, return a dictionary + of matching fmris: + + {pkgname: [fmri1, fmri2, ...] + pkgname: [fmri1, fmri2, ...], + ... + } + + Constraint used is always AUTO as per expected UI behavior. + + 'match_type' indicates how matching should be restricted. The + possible values are: + + MATCH_ALL + Matching is performed using all known package stems + and versions. In this case, 'installed_pkgs' must also + be provided. + + MATCH_INST_VERSIONS + Matching is performed using only installed package + stems and versions. + + MATCH_INST_STEMS + Matching is performed using all known package versions + for stems matching installed packages. In this case, + 'installed_pkgs' must also be provided. + + + Note that patterns starting w/ pkg:/ require an exact match; + patterns containing '*' will using fnmatch rules; the default + trailing match rules are used for remaining patterns unless + 'default_matcher' is specified. + + 'default_matcher' is an optional pkg.fmri.match_* method to + determine which matching rules should be applied to patterns + that do not use wildcards or start with 'pkg:/' or '/'. + + Exactly duplicated patterns are ignored. + + Routine raises PlanCreationException if errors occur: it is + illegal to specify multiple different patterns that match the + same pkg name unless exactly one of those patterns contained no + wildcards. Only patterns that contain wildcards are allowed to + match multiple packages. + + FMRI lists are trimmed by publisher, either by pattern + specification, installed version or publisher ranking (in that + order) when match_type is not MATCH_INST_VERSIONS. + + 'raise_not_installed' determines whether an exception will be + raised if any pattern matches a package that's not installed. + + 'reject_set' is a set() containing the stems of packages that + should be excluded from matches. + """ + + # problems we check for + illegals = [] + nonmatch = [] + multimatch = [] + not_installed = [] + multispec = [] + exclpats = [] + wrongpub = [] + wrongvar = set() + + matchers = [] + fmris = [] + pubs = [] + versions = [] + + wildcard_patterns = set() + + renamed_fmris = defaultdict(set) + obsolete_fmris = [] + + # ignore dups + patterns = list(set(patterns)) + + installed_pubs = misc.EmptyDict + if match_type in [ImagePlan.MATCH_INST_STEMS, ImagePlan.MATCH_ALL]: + # build installed publisher dictionary + installed_pubs = dict( + ( + (f.pkg_name, f.get_publisher()) + for f in installed_pkgs.values() + ) + ) + + # figure out which kind of matching rules to employ + latest_pats = set() + seen = set() + npatterns = [] + for pat in patterns: + try: + parts = pat.split("@", 1) + pat_stem = parts[0] + pat_ver = None + if len(parts) > 1: + pat_ver = parts[1] + + if "*" in pat_stem or "?" in pat_stem: + matcher = pkg.fmri.glob_match + wildcard_patterns.add(pat) + elif pat_stem.startswith("pkg:/") or pat_stem.startswith("/"): + matcher = pkg.fmri.exact_name_match + elif default_matcher: + matcher = default_matcher + else: + matcher = pkg.fmri.fmri_match - # This maps destination actions to the pkgplans they're - # associated with, which allows us to create the newly - # discovered update _ActionPlans. - dest_pkgplans = {} + if matcher == pkg.fmri.glob_match: + fmri = pkg.fmri.MatchingPkgFmri(pat_stem) + else: + fmri = pkg.fmri.PkgFmri(pat_stem) - new_updates = [] - for i, ap in enumerate(self.pd.install_actions): - if ap is None: - continue - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - - if "path" in ap.dst.attrs and \ - self.__check_excluded(ap.dst.attrs["path"]): - if DebugValues["exclude"]: - print("!Install", ap.dst.attrs["path"]) - self.pd.elided_actions.append(ap) - pp_needs_trimming.add(ap.p) - ap.p.actions.added[plan_pos[id(ap.dst)]] = None - self.pd.install_actions[i] = None - - # In order to handle editable files that move their path - # or change pkgs, for all new files with original_name - # attribute, make sure file isn't being removed by - # checking removal list. If it is, tag removal to save - # file, and install to recover cached version... caching - # is needed if directories are removed or don't exist - # yet. - if (ap.dst.name == "file" and - "original_name" in ap.dst.attrs and - ap.dst.attrs["original_name"] in cons_named): - cache_name = ap.dst.attrs["original_name"] - index = cons_named[cache_name].idx - ra = self.pd.removal_actions[index].src - assert(id(ra) == cons_named[cache_name].id) - # If the paths match, don't remove and add; - # convert to update. - if ap.dst.attrs["path"] == ra.attrs["path"]: - new_updates.append((ra, ap.dst)) - # If we delete items here, the indices - # in cons_named will be bogus, so mark - # them for later deletion. - self.pd.removal_actions[index] = None - self.pd.install_actions[i] = None - # No need to handle it in cons_generic - # anymore - del cons_generic[("file", ra.attrs["path"])] - dest_pkgplans[id(ap.dst)] = ap.p - else: - # The 'true' indicates the file should - # be removed from source. The removal - # action is changed using setdefault so - # that any overlay rules applied during - # conflict checking remain intact. - ra.attrs.setdefault("save_file", - [cache_name, "true"]) - ap.dst.attrs["save_file"] = [cache_name, - "true"] - - cache_name = index = ra = None - - # Similarly, try to prevent files (and other actions) - # from unnecessarily being deleted and re-created if - # they're simply moving between packages, but only if - # they keep their paths (or key-attribute values). - keyval = hashify(ap.dst.attrs.get(ap.dst.key_attr, None)) - if (ap.dst.name, keyval) in cons_generic: - nkv = ap.dst.name, keyval - index = cons_generic[nkv].idx - ra = self.pd.removal_actions[index].src - assert(id(ra) == cons_generic[nkv].id) - if keyval == ra.attrs[ra.key_attr]: - new_updates.append((ra, ap.dst)) - self.pd.removal_actions[index] = None - self.pd.install_actions[i] = None - dest_pkgplans[id(ap.dst)] = ap.p - # Add the action to the pkgplan's update - # list and mark it for removal from the - # install list. - ap.p.actions.changed.append((ra, ap.dst)) - ap.p.actions.added[plan_pos[id(ap.dst)]] = None - pp_needs_trimming.add(ap.p) - nkv = index = ra = None - - self.pd._actuators.scan_install(ap) - if self.pd._need_boot_archive is None: - if ap.dst.attrs.get("path", "").startswith( - ramdisk_prefixes): - self.pd._need_boot_archive = True - - del ConsolidationEntry, cons_generic, cons_named, plan_pos - - # Remove from the pkgplans the install actions which have been - # consolidated away. - for p in pp_needs_trimming: - # Can't modify the p.actions tuple, so modify the added - # member in-place. - p.actions.added[:] = [ - a - for a in p.actions.added - if a is not None - ] - del pp_needs_trimming - - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - - # We want to cull out actions where they've not changed at all, - # leaving only the changed ones to put into - # self.pd.update_actions. - nu_src = manifest.Manifest() - nu_src.set_content(content=(a[0] for a in new_updates), - excludes=self.__old_excludes) - nu_dst = manifest.Manifest() - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - nu_dst.set_content(content=(a[1] for a in new_updates), - excludes=self.__new_excludes) - del new_updates - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - nu_add, nu_chg, nu_rem = nu_dst.difference(nu_src, - self.__old_excludes, self.__new_excludes) - pt.plan_add_progress(pt.PLAN_ACTION_CONSOLIDATE) - # All the differences should be updates - assert not nu_add - assert not nu_rem - del nu_src, nu_dst - - # Extend update_actions with the new tuples. The package plan - # is the one associated with the action getting installed. - self.pd.update_actions.extend([ - _ActionPlan(dest_pkgplans[id(dst)], src, dst) - for src, dst in nu_chg - ]) - - del dest_pkgplans, nu_chg - - # Cull any update actions that are excluded by the exclusion - # patterns configured in the image. - for i, ap in enumerate(self.pd.update_actions): - if ap is None: + if not pat_ver: + # Do nothing. + pass + elif "*" in pat_ver or "?" in pat_ver or pat_ver == "latest": + fmri.version = pkg.version.MatchingVersion(pat_ver) + else: + fmri.version = pkg.version.Version(pat_ver) + + sfmri = str(fmri) + if sfmri in seen: + # A different form of the same pattern + # was specified already; ignore this + # one (e.g. pkg:/network/ping, + # /network/ping). + wildcard_patterns.discard(pat) + continue + + seen.add(sfmri) + npatterns.append(pat) + if pat_ver and getattr(fmri.version, "match_latest", None): + latest_pats.add(pat) + + matchers.append(matcher) + pubs.append(fmri.publisher) + versions.append(fmri.version) + fmris.append(fmri) + + except (pkg.fmri.FmriError, pkg.version.VersionError) as e: + # illegals should be a list of fmri patterns so that + # PackageMatchErrors can construct correct error message. + illegals.append(pat) + patterns = npatterns + del npatterns, seen + + # Create a dictionary of patterns, with each value being a + # dictionary of pkg names & fmris that match that pattern. + ret = dict(zip(patterns, [dict() for i in patterns])) + + # Track patterns rejected due to user request (--reject). + rejected_pats = set() + + # Track patterns rejected due to variants. + rejected_vars = set() + + # keep track of publishers we reject due to implict selection + # of installed publisher to produce better error message. + rejected_pubs = {} + + if match_type != ImagePlan.MATCH_INST_VERSIONS: + cat = image.get_catalog(image.IMG_CATALOG_KNOWN) + info_needed = [pkg.catalog.Catalog.DEPENDENCY] + else: + cat = image.get_catalog(image.IMG_CATALOG_INSTALLED) + info_needed = [] + + variants = image.get_variants() + for name in cat.names(): + for pat, matcher, fmri, version, pub in zip( + patterns, matchers, fmris, versions, pubs + ): + if not matcher(name, fmri.pkg_name): + continue # name doesn't match + for ver, entries in cat.entries_by_version( + name, info_needed=info_needed + ): + if version and not ver.is_successor( + version, pkg.version.CONSTRAINT_AUTO + ): + continue # version doesn't match + for f, metadata in entries: + fpub = f.publisher + if pub and pub != fpub: + continue # specified pubs conflict + elif ( + match_type == ImagePlan.MATCH_INST_STEMS + and f.pkg_name not in installed_pkgs + ): + # Matched stem is not + # in list of installed + # stems. + continue + elif f.pkg_name in reject_set: + # Pattern is excluded. + rejected_pats.add(pat) + continue + + states = metadata["metadata"]["states"] + ren_deps = [] + omit_package = False + # Check for renamed packages and + # that the package matches the + # image's variants. + for astr in metadata.get("actions", misc.EmptyI): + try: + a = pkg.actions.fromstr(astr) + except pkg.actions.ActionError: + # Unsupported or + # invalid package; + # drive on and + # filter as much as + # possible. The + # solver will reject + # this package later. continue - path = None - if (ap.src and "path" in ap.src.attrs and - self.__check_excluded( - ap.src.attrs["path"])): - path = ap.src.attrs["path"] - elif (ap.dst and "path" in ap.dst.attrs and - self.__check_excluded( - ap.dst.attrs["path"])): - path = ap.dst.attrs["path"] - - if path: - if DebugValues["exclude"]: - print("!Update", path) - self.pd.elided_actions.append(ap) - self.pd.update_actions[i] = None - - pt.plan_done(pt.PLAN_ACTION_CONSOLIDATE) - pt.plan_start(pt.PLAN_ACTION_MEDIATION) - pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) - - # Mediate and repair links affected by the plan. - prop_mediators = self.__mediate_links(mediated_removed_paths) - - pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) - for prop in ("removal_actions", "install_actions", - "update_actions"): - pval = getattr(self.pd, prop) - pval[:] = [ - a - for a in pval - if a is not None - ] - - pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) - - # Add any necessary repairs to plan. - self.__evaluate_fixups() - - pt.plan_add_progress(pt.PLAN_ACTION_MEDIATION) - - # Finalize link mediation. - self.__finalize_mediation(prop_mediators, mediated_del_path_target) - - pt.plan_done(pt.PLAN_ACTION_MEDIATION) - if DebugValues["actions"]: - print("--- Final actions ---") - for prop in ("removal_actions", "install_actions", - "update_actions"): - key = prop.split("_")[0] - for a in getattr(self.pd, prop): - print("{0} {1}".format(key, str(a))) - - pt.plan_start(pt.PLAN_ACTION_FINALIZE) - - # Go over update actions - l_refresh = [] - l_actions = {} - if self.pd.update_actions: - # iterating over actions is slow, so don't do it - # unless we have to. - l_actions = self.get_actions("hardlink", - self.hardlink_keyfunc) - for a in self.pd.update_actions: - # For any files being updated that are the target of - # _any_ hardlink actions, append the hardlink actions - # to the update list so that they are not broken. - # Since we reference count hardlinks, update each one - # only once. - if a[2].name == "file": - path = a[2].attrs["path"] - if path in l_actions: - unique_links = dict((l.attrs["path"], l) - for l in l_actions[path]) - l_refresh.extend([ - _ActionPlan(a[0], l, l) - for l in unique_links.values() - ]) - path = None - - # scan both old and new actions - # repairs may result in update action w/o orig action - self.pd._actuators.scan_update(a) - if self.pd._need_boot_archive is None: - if a[2].attrs.get("path", "").startswith( - ramdisk_prefixes): - self.pd._need_boot_archive = True - - self.pd.update_actions.extend(l_refresh) - - # sort actions to match needed processing order - remsort = operator.itemgetter(1) - addsort = operator.itemgetter(2) - self.pd.removal_actions.sort(key=remsort, reverse=True) - self.pd.update_actions.sort(key=addsort) - self.pd.install_actions.sort(key=addsort) - - # find the first and last hardlink in the install_actions - fhl = lhl = -1 - for i, ap in enumerate(self.pd.install_actions): - if ap.dst.name == "hardlink": - if fhl == -1: - fhl = i - lhl = i - elif fhl != -1: - break - - # now reorder the hardlinks to respect inter-dependencies - if fhl != -1: - hardlinks = self.pd.install_actions[fhl:lhl + 1] - hardlinks = _reorder_hardlinks(hardlinks) - self.pd.install_actions[fhl:lhl + 1] = hardlinks - - # cleanup pkg_plan objects which don't actually contain any - # changes and add any new ones to list of changes - for p in list(self.pd.pkg_plans): - if p.origin_fmri != p.destination_fmri or \ - p.actions.removed or p.actions.changed or \ - p.actions.added: - pair = (p.origin_fmri, p.destination_fmri) - if pair not in self.pd._fmri_changes: - self.pd._fmri_changes.append(pair) + if ( + pkgdefs.PKG_STATE_RENAMED in states + and a.name == "depend" + and a.attrs["type"] == "require" + ): + ren_deps.append( + pkg.fmri.PkgFmri(a.attrs["fmri"]) + ) + continue + elif a.name != "set": continue - self.pd.pkg_plans.remove(p) - fmri = p.origin_fmri - if (fmri, fmri) in self.pd._fmri_changes: - self.pd._fmri_changes.remove( - (fmri, fmri)) - del p - - # - # Sort the package plans by fmri to create predictability (and - # some sense of order) in the download output; this is not - # a perfect sort of this, but we only really care for things - # we fetch over the wire. - # - def key_func(a): - if a.destination_fmri: - return a.destination_fmri - return "" - - self.pd.pkg_plans.sort(key=key_func) - - pt.plan_done(pt.PLAN_ACTION_FINALIZE) - - if self.pd._need_boot_archive is None: - self.pd._need_boot_archive = False - - self.pd.state = plandesc.EVALUATED_OK - - def nothingtodo(self): - """Test whether this image plan contains any work to do """ - - if self.pd.state in [plandesc.EVALUATED_PKGS, - plandesc.MERGED_OK]: - return not (self.pd._fmri_changes or - self.pd._new_variants or - (self.pd._new_facets is not None) or - self.pd._mediators_change or - self.pd.pkg_plans) - elif self.pd.state >= plandesc.EVALUATED_OK: - return not (self.pd.pkg_plans or - self.pd._new_variants or - (self.pd._new_facets is not None) or - self.pd._mediators_change) - assert 0, "Shouldn't call nothingtodo() for state = {0:d}".format( - self.pd.state) - - def preexecute(self): - """Invoke the evaluated image plan - preexecute, execute and postexecute - execute actions need to be sorted across packages - """ - - assert self.pd.state == plandesc.EVALUATED_OK - - if self.pd._image_lm != \ - self.image.get_last_modified(string=True): - # State has been modified since plan was created; this - # plan is no longer valid. - self.pd.state = plandesc.PREEXECUTED_ERROR - raise api_errors.InvalidPlanError() - - if self.nothingtodo(): - self.pd.state = plandesc.PREEXECUTED_OK - return - - if self.image.version != self.image.CURRENT_VERSION: - # Prevent plan execution if image format isn't current. - raise api_errors.ImageFormatUpdateNeeded( - self.image.root) - - if DebugValues["plandesc_validate"]: - # get a json copy of the plan description so that - # later we can verify that it wasn't updated during - # the pre-execution stage. - pd_json1 = self.pd.getstate(self.pd, - reset_volatiles=True) - - # Checks the index to make sure it exists and is - # consistent. If it's inconsistent an exception is thrown. - # If it's totally absent, it will index the existing packages - # so that the incremental update that follows at the end of - # the function will work correctly. It also repairs the index - # for this BE so the user can boot into this BE and have a - # correct index. - if self.update_index: - ind = None - try: - self.image.update_index_dir() - ind = indexer.Indexer(self.image, - self.image.get_manifest, - self.image.get_manifest_path, - progtrack=self.__progtrack, - excludes=self.__old_excludes) - if ind.check_index_existence(): - try: - ind.check_index_has_exactly_fmris( - self.image.gen_installed_pkg_names()) - except se.IncorrectIndexFileHash as e: - self.__preexecuted_indexing_error = \ - api_errors.WrapSuccessfulIndexingException( - e, - traceback.format_exc(), - traceback.format_stack() - ) - ind.rebuild_index_from_scratch( - self.image.\ - gen_installed_pkgs() - ) - except se.IndexingException as e: - # If there's a problem indexing, we want to - # attempt to finish the installation anyway. If - # there's a problem updating the index on the - # new image, that error needs to be - # communicated to the user. - self.__preexecuted_indexing_error = \ - api_errors.WrapSuccessfulIndexingException( - e, traceback.format_exc(), - traceback.format_stack()) - - # No longer needed. - del ind - - # check if we're going to have enough room - # stat fs again just in case someone else is using space... - self.__update_avail_space() - if self.pd._cbytes_added > self.pd._cbytes_avail: - raise api_errors.ImageInsufficentSpace( - self.pd._cbytes_added, - self.pd._cbytes_avail, - _("Download cache")) - if self.pd._bytes_added > self.pd._bytes_avail: - raise api_errors.ImageInsufficentSpace( - self.pd._bytes_added, - self.pd._bytes_avail, - _("Root filesystem")) - - # Remove history about manifest/catalog transactions. This - # helps the stats engine by only considering the performance of - # bulk downloads. - self.image.transport.stats.reset() - - # - # Calculate size of data retrieval and pass it to progress - # tracker. - # - npkgs = nfiles = nbytes = 0 - for p in self.pd.pkg_plans: - nf, nb = p.get_xferstats() - nbytes += nb - nfiles += nf - - # It's not perfectly accurate but we count a download - # even if the package will do zero data transfer. This - # makes the pkg stats consistent between download and - # install. - npkgs += 1 - self.__progtrack.download_set_goal(npkgs, nfiles, nbytes) - - lic_errors = [] - try: - # Check for license acceptance issues first to avoid - # wasted time in the download phase and so failure - # can occur early. - for p in self.pd.pkg_plans: - try: - p.preexecute() - except api_errors.PkgLicenseErrors as e: - # Accumulate all license errors. - lic_errors.append(e) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - - if lic_errors: - raise api_errors.PlanLicenseErrors(lic_errors) - - try: - for p in self.pd.pkg_plans: - p.download(self.__progtrack, - self.__check_cancel) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - except (api_errors.InvalidDepotResponseException, - api_errors.TransportError) as e: - if p and p._autofix_pkgs: - e._autofix_pkgs = p._autofix_pkgs - raise - - self.image.transport.shutdown() - self.__progtrack.download_done() - except: - self.pd.state = plandesc.PREEXECUTED_ERROR - raise - self.pd.state = plandesc.PREEXECUTED_OK - - if DebugValues["plandesc_validate"]: - # verify that preexecution did not update the plan - pd_json2 = self.pd.getstate(self.pd, - reset_volatiles=True) - pkg.misc.json_diff("PlanDescription", - pd_json1, pd_json2, pd_json1, pd_json2) - del pd_json1, pd_json2 - - def execute(self): - """Invoke the evaluated image plan - preexecute, execute and postexecute - execute actions need to be sorted across packages - """ - assert self.pd.state == plandesc.PREEXECUTED_OK - - if self.pd._image_lm != \ - self.image.get_last_modified(string=True): - # State has been modified since plan was created; this - # plan is no longer valid. - self.pd.state = plandesc.EXECUTED_ERROR - raise api_errors.InvalidPlanError() - - # load data from previously downloaded actions - try: - for p in self.pd.pkg_plans: - p.cacheload() - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - raise + atname = a.attrs["name"] + if not atname.startswith("variant."): + continue - # check for available space - self.__update_avail_space() - if (self.pd._bytes_added - self.pd._cbytes_added) > self.pd._bytes_avail: - raise api_errors.ImageInsufficentSpace( - self.pd._bytes_added - self.pd._cbytes_added, - self.pd._bytes_avail, - _("Root filesystem")) - - # - # what determines execution order? - # - # The following constraints are key in understanding imageplan - # execution: - # - # 1) All non-directory actions (files, users, hardlinks, - # symbolic links, etc.) must appear in only a single installed - # package. - # - # 2) All installed packages must be consistent in their view of - # action types; if /usr/openwin is a directory in one package, - # it must be a directory in all packages, never a symbolic link; - # this includes implicitly defined directories. - # - # A key goal in IPS is to be able to undergo an arbitrary - # transformation in package contents in a single step. Packages - # must be able to exchange files, convert directories to - # symbolic links, etc.; so long as the start and end states meet - # the above two constraints IPS must be able to transition - # between the states directly. This leads to the following: - # - # 1) All actions must be ordered across packages; packages - # cannot be updated one at a time. - # - # This is readily apparent when one considers two packages - # exchanging files in their new versions; in each case the - # package now owning the file must be installed last, but it - # is not possible for each package to be installed before the - # other. Clearly, all the removals must be done first, - # followed by the installs and updates. - # - # 2) Installs of new actions must precede updates of existing - # ones. - # - # In order to accommodate changes of file ownership of - # existing files to a newly created user, it is necessary - # for the installation of that user to precede the update of - # files to reflect their new ownership. - # - # The exception to this rule is driver actions. Aliases of - # existing drivers which are going to be removed must be - # removed before any new drivers are installed or updated. - # This prevents an error if an alias is moving from one - # driver to another. - - if self.nothingtodo(): - self.pd.state = plandesc.EXECUTED_OK - return - - pt = self.__progtrack - pt.set_major_phase(pt.PHASE_EXECUTE) - - # It's necessary to do this check here because the state of the - # image before the current operation is performed is desired. - empty_image = self.__is_image_empty() - - if not empty_image: - # Before proceeding, remove fast lookups database so - # that if _create_fast_lookups is interrupted later the - # client isn't left with invalid state. - self.image._remove_fast_lookups() - - if not self.image.is_liveroot(): - # Check if the child is a running zone. If so run the - # actuator in the zone. - - # Linked Image code uses trailing slashes, Image code - # does not. So we make sure that our path comparisons - # are always on tha same page. - root = os.path.normpath(self.image.root) - - rzones = zone.list_running_zones() - for z, path in six.iteritems(rzones): - if os.path.normpath(path) == root: - self.pd._actuators.set_zone(z) - # there should be only on zone per path - break - - self.pd._actuators.exec_prep(self.image) - - self.pd._actuators.exec_pre_actuators(self.image) - - # List of tuples of (src, dest) used to track each pkgplan so - # that it can be discarded after execution. - executed_pp = [] - try: - try: - pt.actions_set_goal(pt.ACTION_REMOVE, - len(self.pd.removal_actions)) - pt.actions_set_goal(pt.ACTION_INSTALL, - len(self.pd.install_actions)) - pt.actions_set_goal(pt.ACTION_UPDATE, - len(self.pd.update_actions)) - - # execute removals - for p, src, dest in self.pd.removal_actions: - p.execute_removal(src, dest) - pt.actions_add_progress( - pt.ACTION_REMOVE) - pt.actions_done(pt.ACTION_REMOVE) - - # Update driver alias database to reflect the - # aliases drivers have lost in the new image. - # This prevents two drivers from ever attempting - # to have the same alias at the same time. - for name, aliases in \ - six.iteritems(self.pd._rm_aliases): - driver.DriverAction.remove_aliases(name, - aliases, self.image) - - # Done with removals; discard them so memory can - # be re-used. - self.pd.removal_actions = [] - - # execute installs; if action throws a retry - # exception try it again afterwards. - retries = [] - for p, src, dest in self.pd.install_actions: - try: - p.execute_install(src, dest) - pt.actions_add_progress( - pt.ACTION_INSTALL) - except pkg.actions.ActionRetry: - retries.append((p, src, dest)) - for p, src, dest in retries: - p.execute_retry(src, dest) - pt.actions_add_progress( - pt.ACTION_INSTALL) - retries = [] - pt.actions_done(pt.ACTION_INSTALL) - - # Done with installs, so discard them so memory - # can be re-used. - self.pd.install_actions = [] - - # execute updates; in some cases there may be - # a retryable exception, so capture those and - # retry after running through all the - # actions(which might address the reason for - # the retryable exception). - # An example is a user action that depends - # upon a file existing (ie ftpusers). - retries = [] - for p, src, dest in self.pd.update_actions: - try: - p.execute_update(src, dest) - pt.actions_add_progress( - pt.ACTION_UPDATE) - except pkg.actions.ActionRetry: - retries.append((p, src, dest)) - - for p, src, dest in retries: - p.execute_retry(src, dest) - pt.actions_add_progress(pt.ACTION_UPDATE) - retries = [] - - pt.actions_done(pt.ACTION_UPDATE) - pt.actions_all_done() - pt.set_major_phase(pt.PHASE_FINALIZE) - - # Done with updates, so discard them so memory - # can be re-used. - self.pd.update_actions = [] - - # handle any postexecute operations - while self.pd.pkg_plans: - # postexecute in reverse, but pkg_plans - # aren't ordered, so does it matter? - # This allows the pkgplan objects to be - # discarded as they're executed which - # allows memory to be-reused sooner. - p = self.pd.pkg_plans.pop() - p.postexecute() - executed_pp.append((p.destination_fmri, - p.origin_fmri)) - p = None - - # save package state - self.image.update_pkg_installed_state( - executed_pp, self.__progtrack, - self.__match_inst.keys()) - # no longer needed - self.__match_inst = {} - - # write out variant changes to the image config - if self.pd._varcets_change or \ - self.pd._mediators_change: - self.image.image_config_update( - self.pd._new_variants, - self.pd._new_facets, - self.pd._new_mediators) - # write out any changes - self.image._avoid_set_save( - *self.pd._new_avoid_obs) - # An essential step to set the property - # "dehydrated" if dehydrate/rehydrate succeeds. - if self.pd._op in (PKG_OP_DEHYDRATE, - PKG_OP_REHYDRATE): - self.image.cfg.set_property("property", - "dehydrated", self.operations_pubs) - self.image.save_config() - else: - # Mark image as modified if not calling - # save_config (which will do it for us). - self.image.update_last_modified() - - except EnvironmentError as e: - if e.errno == errno.EACCES or \ - e.errno == errno.EPERM: - raise api_errors.PermissionsException( - e.filename) - elif e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - elif e.errno == errno.ELOOP: - act = pkg.actions.unknown.UnknownAction() - raise api_errors.ActionExecutionError( - act, _("A link targeting itself or " - "part of a link loop was found at " - "'{0}'; a file or directory was " - "expected. Please remove the link " - "and try again.").format( - e.filename)) - raise - except pkg.actions.ActionError: - exc_type, exc_value, exc_tb = sys.exc_info() - self.pd.state = plandesc.EXECUTED_ERROR - try: - self.pd._actuators.exec_fail_actuators( - self.image) - except: - # Ensure the real cause of failure is raised. - pass - if six.PY2: - six.reraise(api_errors.InvalidPackageErrors([ - exc_value]), None, exc_tb) - else: - # six.reraise requires the first argument - # callable if the second argument is None. - # Also the traceback is automatically attached, - # in Python 3, so we can simply raise it. - raise api_errors.InvalidPackageErrors([ - exc_value]) - except: - exc_type, exc_value, exc_tb = sys.exc_info() - self.pd.state = plandesc.EXECUTED_ERROR - try: - self.pd._actuators.exec_fail_actuators( - self.image) - finally: - # This ensures that the original exception and - # traceback are used if exec_fail_actuators - # fails. - if six.PY2: - six.reraise(exc_value, None, exc_tb) - else: - raise exc_value + # For all variants set + # in the image, elide + # packages that are not + # for a matching variant + # value. + atvalue = a.attrs["value"] + is_list = type(atvalue) == list + for vn, vv in six.iteritems(variants): + if vn == atname and ( + (is_list and vv not in atvalue) + or (not is_list and vv != atvalue) + ): + omit_package = True + break + + if omit_package: + # Package skipped due to + # variant. + rejected_vars.add(pat) + continue + + ret[pat].setdefault(f.pkg_name, []).append(f) + + if ( + not pub + and match_type != ImagePlan.MATCH_INST_VERSIONS + and name in installed_pubs + and pub_ranks[installed_pubs[name]][1] == True + and installed_pubs[name] != fpub + ): + # Fmri publisher + # filtering is handled + # later. + rejected_pubs.setdefault(pat, set()).add(fpub) + + states = metadata["metadata"]["states"] + if pkgdefs.PKG_STATE_OBSOLETE in states: + obsolete_fmris.append(f) + if pkgdefs.PKG_STATE_RENAMED in states: + renamed_fmris[f] = ren_deps + + # remove multiple matches if all versions are obsolete + for p in patterns: + if len(ret[p]) > 1 and p not in wildcard_patterns: + # create dictionary of obsolete status vs + # pkg_name + obsolete = dict( + [ + ( + pkg_name, + reduce( + operator.or_, + [f in obsolete_fmris for f in ret[p][pkg_name]], + ), + ) + for pkg_name in ret[p] + ] + ) + # remove all obsolete match if non-obsolete + # match also exists + if set([True, False]) == set(obsolete.values()): + for pkg_name in obsolete: + if obsolete[pkg_name]: + del ret[p][pkg_name] + + # remove newer multiple match if renamed version exists + for p in patterns: + if len(ret[p]) > 1 and p not in wildcard_patterns: + renamed_matches = [ + pfmri + for pkg_name in ret[p] + for pfmri in ret[p][pkg_name] + if pfmri in renamed_fmris + ] + targets = set( + [ + pf.pkg_name + for f in renamed_matches + for pf in renamed_fmris[f] + ] + ) + for pkg_name in list(ret[p].keys()): + if pkg_name in targets: + del ret[p][pkg_name] + + # Determine match failures. + # matchdict maps package stems to input patterns. + matchdict = {} + for p in patterns: + l = len(ret[p]) + if l == 0: # no matches at all + if p in rejected_vars: + wrongvar.add(p) + elif p in rejected_pats: + exclpats.append(p) else: - self.pd._actuators.exec_post_actuators(self.image) - - self.image._create_fast_lookups(progtrack=self.__progtrack) - self.__save_release_notes() - - # success - self.pd.state = plandesc.EXECUTED_OK - self.pd._executed_ok() - - # reduce memory consumption - self.saved_files = {} - self.valid_directories = set() - self.__cached_actions = {} - - # Clear out the primordial user and group caches. - self.image._users = set() - self.image._groups = set() - self.image._usersbyname = {} - self.image._groupsbyname = {} - - # Perform the incremental update to the search indexes - # for all changed packages - if self.update_index: - self.image.update_index_dir() - ind = indexer.Indexer(self.image, - self.image.get_manifest, - self.image.get_manifest_path, - progtrack=self.__progtrack, - excludes=self.__new_excludes) - try: - if empty_image: - ind.setup() - if empty_image or ind.check_index_existence(): - ind.client_update_index(([], - executed_pp), self.image) - except KeyboardInterrupt: - raise - except se.ProblematicPermissionsIndexException: - # ProblematicPermissionsIndexException - # is included here as there's little - # chance that trying again will fix this - # problem. - raise api_errors.WrapIndexingException(e, - traceback.format_exc(), - traceback.format_stack()) - except Exception as e: - # It's important to delete and rebuild - # from scratch rather than using the - # existing indexer because otherwise the - # state will become confused. - del ind - # XXX Once we have a framework for - # emitting a message to the user in this - # spot in the code, we should tell them - # something has gone wrong so that we - # continue to get feedback to allow - # us to debug the code. - try: - ind = indexer.Indexer(self.image, - self.image.get_manifest, - self.image.get_manifest_path, - progtrack=self.__progtrack, - excludes=self.__new_excludes) - ind.rebuild_index_from_scratch( - self.image.gen_installed_pkgs()) - except Exception as e: - raise api_errors.WrapIndexingException( - e, traceback.format_exc(), - traceback.format_stack()) - raise \ - api_errors.WrapSuccessfulIndexingException( - e, traceback.format_exc(), - traceback.format_stack()) - if self.__preexecuted_indexing_error is not None: - raise self.__preexecuted_indexing_error - - # As the very last thing, check if there are any broken - # mediators. - if self.invalid_meds and not self.image.is_zone(): - medmsg = self.__make_med_msg() - raise api_errors.InvalidMediatorTarget(medmsg) - - - def __is_image_empty(self): - try: - next(self.image.gen_installed_pkg_names()) - return False - except StopIteration: - return True - - @staticmethod - def match_user_stems(image, patterns, match_type, raise_unmatched=True, - raise_not_installed=True, return_matchdict=False, universe=None): - """Given a user specified list of patterns, return a set - of matching package stems. Any versions specified are - ignored. - - 'match_type' indicates how matching should be restricted. The - possible values are: - - MATCH_ALL - Matching is performed using all known package stems. - - MATCH_INST_VERSIONS - Matching is performed using only installed package - stems. - - MATCH_UNINSTALLED - Matching is performed using uninstalled packages; - it is an error for a pattern to match an installed - package. - - Note that patterns starting w/ pkg:/ require an exact match; - patterns containing '*' will using fnmatch rules; the default - trailing match rules are used for remaining patterns. - - Exactly duplicated patterns are ignored. - - Routine raises PlanCreationException if errors occur: it is - illegal to specify multiple different patterns that match the - same pkg name. Only patterns that contain wildcards are allowed - to match multiple packages. - - 'raise_unmatched' determines whether an exception will be - raised if any patterns didn't match any packages. - - 'raise_not_installed' determines whether an exception will be - raised if any pattern matches a package that's not installed. - - 'return_matchdict' determines whether the dictionary containing - which patterns matched which stems or the list of stems is - returned. - - 'universe' contains a list of tuples of publishers and package - names against which the patterns should be matched. - """ - # avoid checking everywhere - if not patterns: - return set() - - illegals = [] - nonmatch = [] - multimatch = [] - not_installed = [] - multispec = [] - already_installed = [] - - matchers = [] - fmris = [] - pubs = [] - - wildcard_patterns = set() - - # ignore dups - patterns = list(set(patterns)) - - # figure out which kind of matching rules to employ - seen = set() - npatterns = [] - for pat in patterns: - try: - parts = pat.split("@", 1) - pat_stem = parts[0] - - if "*" in pat_stem or "?" in pat_stem: - matcher = pkg.fmri.glob_match - wildcard_patterns.add(pat) - elif pat_stem.startswith("pkg:/") or \ - pat_stem.startswith("/"): - matcher = pkg.fmri.exact_name_match - else: - matcher = pkg.fmri.fmri_match - - if matcher == pkg.fmri.glob_match: - fmri = pkg.fmri.MatchingPkgFmri( - pat_stem) - else: - fmri = pkg.fmri.PkgFmri(pat_stem) - - sfmri = str(fmri) - if sfmri in seen: - # A different form of the same pattern - # was specified already; ignore this - # one (e.g. pkg:/network/ping, - # /network/ping). - wildcard_patterns.discard(pat) - continue - - seen.add(sfmri) - npatterns.append(pat) - matchers.append(matcher) - pubs.append(fmri.publisher) - fmris.append(fmri) - except pkg.fmri.FmriError as e: - illegals.append(e) - patterns = npatterns - del npatterns, seen - - # Create a dictionary of patterns, with each value being a - # set of pkg names that match that pattern. - ret = dict(zip(patterns, [set() for i in patterns])) - - if universe is not None: - assert match_type == ImagePlan.MATCH_ALL - pkg_names = universe + nonmatch.append(p) + elif l > 1 and p not in wildcard_patterns: + # multiple matches + multimatch.append( + ( + p, + set( + [ + f.get_pkg_stem() + for n in ret[p] + for f in ret[p][n] + ] + ), + ) + ) + else: + # single match or wildcard + for k, pfmris in six.iteritems(ret[p]): + # for each matching package name + matchdict.setdefault(k, []).append((p, pfmris)) + + proposed_dict = {} + for name, lst in six.iteritems(matchdict): + nwc_ps = [ + (p, set(pfmris)) + for p, pfmris in lst + if p not in wildcard_patterns + ] + pub_named = False + # If there are any non-wildcarded patterns that match + # this package name, prefer the fmris they selected over + # any the wildcarded patterns selected. + if nwc_ps: + rel_ps = nwc_ps + # Remove the wildcarded patterns that match this + # package from the result dictionary. + for p, pfmris in lst: + if p not in wildcard_patterns: + if p.startswith("pkg://") or p.startswith("//"): + pub_named = True + break + else: + tmp_ps = [ + (p, set(pfmris)) + for p, pfmris in lst + if p in wildcard_patterns + ] + # If wildcarded package names then compare + # patterns to see if any specify a particular + # publisher. If they do, prefer the package + # from that publisher. + rel_ps = [ + (p, set(pfmris)) + for p, pfmris in tmp_ps + if p.startswith("pkg://") or p.startswith("//") + ] + if rel_ps: + pub_named = True else: - if match_type != ImagePlan.MATCH_INST_VERSIONS: - cat = image.get_catalog(image.IMG_CATALOG_KNOWN) - else: - cat = image.get_catalog( - image.IMG_CATALOG_INSTALLED) - pkg_names = cat.pkg_names() - - # construct matches for each pattern - for pkg_pub, name in pkg_names: - for pat, matcher, fmri, pub in \ - zip(patterns, matchers, fmris, pubs): - if pub and pkg_pub != pub: - continue - if matcher(name, fmri.pkg_name): - ret[pat].add(name) - - matchdict = {} - for p in patterns: - l = len(ret[p]) - if l == 0: # no matches at all - nonmatch.append(p) - elif l > 1 and p not in wildcard_patterns: - # multiple matches - multimatch.append((p, list(ret[p]))) - else: - # single match or wildcard - for k in ret[p]: - # for each matching package name - matchdict.setdefault(k, []).append(p) - - for name in matchdict: - if len(matchdict[name]) > 1: - # different pats, same pkg - multispec.append(tuple([name] + - matchdict[name])) - - if match_type == ImagePlan.MATCH_INST_VERSIONS: - not_installed, nonmatch = nonmatch, not_installed - elif match_type == ImagePlan.MATCH_UNINSTALLED: - already_installed = [ - name - for name in image.get_catalog( - image.IMG_CATALOG_INSTALLED).names() - if name in matchdict - ] - if illegals or (raise_unmatched and nonmatch) or multimatch \ - or (not_installed and raise_not_installed) or multispec \ - or already_installed: - raise api_errors.PlanCreationException( - already_installed=already_installed, - illegal=illegals, - missing_matches=not_installed, - multiple_matches=multimatch, - multispec=multispec, - unmatched_fmris=nonmatch) - - if return_matchdict: - return matchdict - return set(matchdict.keys()) - - @staticmethod - def __match_user_fmris(image, patterns, match_type, - pub_ranks=misc.EmptyDict, installed_pkgs=misc.EmptyDict, - raise_not_installed=True, reject_set=misc.EmptyI, - default_matcher=None): - """Given a user-specified list of patterns, return a dictionary - of matching fmris: - - {pkgname: [fmri1, fmri2, ...] - pkgname: [fmri1, fmri2, ...], - ... - } - - Constraint used is always AUTO as per expected UI behavior. - - 'match_type' indicates how matching should be restricted. The - possible values are: - - MATCH_ALL - Matching is performed using all known package stems - and versions. In this case, 'installed_pkgs' must also - be provided. - - MATCH_INST_VERSIONS - Matching is performed using only installed package - stems and versions. - - MATCH_INST_STEMS - Matching is performed using all known package versions - for stems matching installed packages. In this case, - 'installed_pkgs' must also be provided. - - - Note that patterns starting w/ pkg:/ require an exact match; - patterns containing '*' will using fnmatch rules; the default - trailing match rules are used for remaining patterns unless - 'default_matcher' is specified. - - 'default_matcher' is an optional pkg.fmri.match_* method to - determine which matching rules should be applied to patterns - that do not use wildcards or start with 'pkg:/' or '/'. - - Exactly duplicated patterns are ignored. - - Routine raises PlanCreationException if errors occur: it is - illegal to specify multiple different patterns that match the - same pkg name unless exactly one of those patterns contained no - wildcards. Only patterns that contain wildcards are allowed to - match multiple packages. - - FMRI lists are trimmed by publisher, either by pattern - specification, installed version or publisher ranking (in that - order) when match_type is not MATCH_INST_VERSIONS. - - 'raise_not_installed' determines whether an exception will be - raised if any pattern matches a package that's not installed. - - 'reject_set' is a set() containing the stems of packages that - should be excluded from matches. - """ - - # problems we check for - illegals = [] - nonmatch = [] - multimatch = [] + rel_ps = tmp_ps + + # Find the intersection of versions which matched all + # the relevant patterns. + common_pfmris = rel_ps[0][1] + for p, vs in rel_ps[1:]: + common_pfmris &= vs + # If none of the patterns specified a particular + # publisher and the package in question is installed + # from a sticky publisher, then remove all pfmris which + # have a different publisher. + inst_pub = installed_pubs.get(name) + stripped_by_publisher = False + if ( + not pub_named + and common_pfmris + and match_type != ImagePlan.MATCH_INST_VERSIONS + and inst_pub + and pub_ranks[inst_pub][1] == True + ): + common_pfmris = set( + p for p in common_pfmris if p.publisher == inst_pub + ) + stripped_by_publisher = True + if common_pfmris: + # The solver depends on these being in sorted + # order. + proposed_dict[name] = sorted(common_pfmris) + elif stripped_by_publisher: + for p, vs in rel_ps: + wrongpub.append((p, rejected_pubs[p])) + else: + multispec.append(tuple([name] + [p for p, vs in rel_ps])) + + if match_type != ImagePlan.MATCH_ALL: + not_installed, nonmatch = nonmatch, not_installed + + if ( + illegals + or nonmatch + or multimatch + or (not_installed and raise_not_installed) + or multispec + or wrongpub + or wrongvar + or exclpats + ): + if not raise_not_installed: not_installed = [] - multispec = [] - exclpats = [] - wrongpub = [] - wrongvar = set() - - matchers = [] - fmris = [] - pubs = [] - versions = [] - - wildcard_patterns = set() - - renamed_fmris = defaultdict(set) - obsolete_fmris = [] - - # ignore dups - patterns = list(set(patterns)) - - installed_pubs = misc.EmptyDict - if match_type in [ImagePlan.MATCH_INST_STEMS, - ImagePlan.MATCH_ALL]: - # build installed publisher dictionary - installed_pubs = dict(( - (f.pkg_name, f.get_publisher()) - for f in installed_pkgs.values() - )) - - # figure out which kind of matching rules to employ - latest_pats = set() - seen = set() - npatterns = [] - for pat in patterns: - try: - parts = pat.split("@", 1) - pat_stem = parts[0] - pat_ver = None - if len(parts) > 1: - pat_ver = parts[1] - - if "*" in pat_stem or "?" in pat_stem: - matcher = pkg.fmri.glob_match - wildcard_patterns.add(pat) - elif pat_stem.startswith("pkg:/") or \ - pat_stem.startswith("/"): - matcher = pkg.fmri.exact_name_match - elif default_matcher: - matcher = default_matcher - else: - matcher = pkg.fmri.fmri_match - - if matcher == pkg.fmri.glob_match: - fmri = pkg.fmri.MatchingPkgFmri( - pat_stem) - else: - fmri = pkg.fmri.PkgFmri( - pat_stem) - - if not pat_ver: - # Do nothing. - pass - elif "*" in pat_ver or "?" in pat_ver or \ - pat_ver == "latest": - fmri.version = \ - pkg.version.MatchingVersion(pat_ver) - else: - fmri.version = \ - pkg.version.Version(pat_ver) - - sfmri = str(fmri) - if sfmri in seen: - # A different form of the same pattern - # was specified already; ignore this - # one (e.g. pkg:/network/ping, - # /network/ping). - wildcard_patterns.discard(pat) - continue - - seen.add(sfmri) - npatterns.append(pat) - if pat_ver and \ - getattr(fmri.version, "match_latest", None): - latest_pats.add(pat) - - matchers.append(matcher) - pubs.append(fmri.publisher) - versions.append(fmri.version) - fmris.append(fmri) - - except (pkg.fmri.FmriError, - pkg.version.VersionError) as e: - # illegals should be a list of fmri patterns so that - # PackageMatchErrors can construct correct error message. - illegals.append(pat) - patterns = npatterns - del npatterns, seen - - # Create a dictionary of patterns, with each value being a - # dictionary of pkg names & fmris that match that pattern. - ret = dict(zip(patterns, [dict() for i in patterns])) - - # Track patterns rejected due to user request (--reject). - rejected_pats = set() - - # Track patterns rejected due to variants. - rejected_vars = set() - - # keep track of publishers we reject due to implict selection - # of installed publisher to produce better error message. - rejected_pubs = {} - - if match_type != ImagePlan.MATCH_INST_VERSIONS: - cat = image.get_catalog(image.IMG_CATALOG_KNOWN) - info_needed = [pkg.catalog.Catalog.DEPENDENCY] + raise api_errors.PlanCreationException( + unmatched_fmris=nonmatch, + multiple_matches=multimatch, + illegal=illegals, + missing_matches=not_installed, + multispec=multispec, + wrong_publishers=wrongpub, + wrong_variants=wrongvar, + rejected_pats=exclpats, + ) + + # eliminate lower ranked publishers + if match_type != ImagePlan.MATCH_INST_VERSIONS: + # no point for installed pkgs.... + for pkg_name in proposed_dict: + pubs_found = set([f.publisher for f in proposed_dict[pkg_name]]) + # 1000 is hack for installed but unconfigured + # publishers + best_pub = sorted( + [(pub_ranks.get(p, (1000, True))[0], p) for p in pubs_found] + )[0][1] + + # Include any installed FMRIs that were allowed + # by all of the previous filtering, even if they + # aren't from the "best" available publisher, to + # account for the scenario where the installed + # version is the newest or best version for the + # plan solution. While doing so, also eliminate + # any exact duplicate FMRIs from publishers + # other than the installed publisher to prevent + # thrashing in the solver due to many equiv. + # solutions and unexpected changes in package + # publishers that weren't explicitly requested. + inst_f = installed_pkgs.get(f.pkg_name, None) + inst_v = None + if inst_f not in proposed_dict[pkg_name]: + # Should only apply if it's part of the + # proposed set. + inst_f = None else: - cat = image.get_catalog(image.IMG_CATALOG_INSTALLED) - info_needed = [] - - variants = image.get_variants() - for name in cat.names(): - for pat, matcher, fmri, version, pub in \ - zip(patterns, matchers, fmris, versions, pubs): - if not matcher(name, fmri.pkg_name): - continue # name doesn't match - for ver, entries in cat.entries_by_version(name, - info_needed=info_needed): - if version and not ver.is_successor(version, - pkg.version.CONSTRAINT_AUTO): - continue # version doesn't match - for f, metadata in entries: - fpub = f.publisher - if pub and pub != fpub: - continue # specified pubs conflict - elif match_type == ImagePlan.MATCH_INST_STEMS and \ - f.pkg_name not in installed_pkgs: - # Matched stem is not - # in list of installed - # stems. - continue - elif f.pkg_name in reject_set: - # Pattern is excluded. - rejected_pats.add(pat) - continue - - states = metadata["metadata"]["states"] - ren_deps = [] - omit_package = False - # Check for renamed packages and - # that the package matches the - # image's variants. - for astr in metadata.get("actions", - misc.EmptyI): - try: - a = pkg.actions.fromstr( - astr) - except pkg.actions.ActionError: - # Unsupported or - # invalid package; - # drive on and - # filter as much as - # possible. The - # solver will reject - # this package later. - continue - - if pkgdefs.PKG_STATE_RENAMED in states and \ - a.name == "depend" and \ - a.attrs["type"] == "require": - ren_deps.append(pkg.fmri.PkgFmri( - a.attrs["fmri"])) - continue - elif a.name != "set": - continue - - atname = a.attrs["name"] - if not atname.startswith("variant."): - continue - - # For all variants set - # in the image, elide - # packages that are not - # for a matching variant - # value. - atvalue = a.attrs["value"] - is_list = type(atvalue) == list - for vn, vv in six.iteritems(variants): - if vn == atname and \ - ((is_list and - vv not in atvalue) or \ - (not is_list and - vv != atvalue)): - omit_package = True - break - - if omit_package: - # Package skipped due to - # variant. - rejected_vars.add(pat) - continue - - ret[pat].setdefault(f.pkg_name, - []).append(f) - - if not pub and match_type != ImagePlan.MATCH_INST_VERSIONS and \ - name in installed_pubs and \ - pub_ranks[installed_pubs[name]][1] \ - == True and installed_pubs[name] != \ - fpub: - # Fmri publisher - # filtering is handled - # later. - rejected_pubs.setdefault(pat, - set()).add(fpub) - - states = metadata["metadata"]["states"] - if pkgdefs.PKG_STATE_OBSOLETE in states: - obsolete_fmris.append(f) - if pkgdefs.PKG_STATE_RENAMED in states: - renamed_fmris[f] = ren_deps - - # remove multiple matches if all versions are obsolete - for p in patterns: - if len(ret[p]) > 1 and p not in wildcard_patterns: - # create dictionary of obsolete status vs - # pkg_name - obsolete = dict([ - (pkg_name, reduce(operator.or_, - [f in obsolete_fmris for f in ret[p][pkg_name]])) - for pkg_name in ret[p] - ]) - # remove all obsolete match if non-obsolete - # match also exists - if set([True, False]) == set(obsolete.values()): - for pkg_name in obsolete: - if obsolete[pkg_name]: - del ret[p][pkg_name] - - # remove newer multiple match if renamed version exists - for p in patterns: - if len(ret[p]) > 1 and p not in wildcard_patterns: - renamed_matches = [ - pfmri - for pkg_name in ret[p] - for pfmri in ret[p][pkg_name] - if pfmri in renamed_fmris - ] - targets = set([ - pf.pkg_name - for f in renamed_matches - for pf in renamed_fmris[f] - ]) - - for pkg_name in list(ret[p].keys()): - if pkg_name in targets: - del ret[p][pkg_name] - - # Determine match failures. - # matchdict maps package stems to input patterns. - matchdict = {} - for p in patterns: - l = len(ret[p]) - if l == 0: # no matches at all - if p in rejected_vars: - wrongvar.add(p) - elif p in rejected_pats: - exclpats.append(p) - else: - nonmatch.append(p) - elif l > 1 and p not in wildcard_patterns: - # multiple matches - multimatch.append((p, set([ - f.get_pkg_stem() - for n in ret[p] - for f in ret[p][n] - ]))) - else: - # single match or wildcard - for k, pfmris in six.iteritems(ret[p]): - # for each matching package name - matchdict.setdefault(k, []).append( - (p, pfmris)) - - proposed_dict = {} - for name, lst in six.iteritems(matchdict): - nwc_ps = [ - (p, set(pfmris)) - for p, pfmris in lst - if p not in wildcard_patterns - ] - pub_named = False - # If there are any non-wildcarded patterns that match - # this package name, prefer the fmris they selected over - # any the wildcarded patterns selected. - if nwc_ps: - rel_ps = nwc_ps - # Remove the wildcarded patterns that match this - # package from the result dictionary. - for p, pfmris in lst: - if p not in wildcard_patterns: - if p.startswith("pkg://") or \ - p.startswith("//"): - pub_named = True - break - else: - tmp_ps = [ - (p, set(pfmris)) - for p, pfmris in lst - if p in wildcard_patterns - ] - # If wildcarded package names then compare - # patterns to see if any specify a particular - # publisher. If they do, prefer the package - # from that publisher. - rel_ps = [ - (p, set(pfmris)) - for p, pfmris in tmp_ps - if p.startswith("pkg://") or - p.startswith("//") - ] - if rel_ps: - pub_named = True - else: - rel_ps = tmp_ps - - # Find the intersection of versions which matched all - # the relevant patterns. - common_pfmris = rel_ps[0][1] - for p, vs in rel_ps[1:]: - common_pfmris &= vs - # If none of the patterns specified a particular - # publisher and the package in question is installed - # from a sticky publisher, then remove all pfmris which - # have a different publisher. - inst_pub = installed_pubs.get(name) - stripped_by_publisher = False - if not pub_named and common_pfmris and \ - match_type != ImagePlan.MATCH_INST_VERSIONS and \ - inst_pub and pub_ranks[inst_pub][1] == True: - common_pfmris = set( - p for p in common_pfmris - if p.publisher == inst_pub - ) - stripped_by_publisher = True - if common_pfmris: - # The solver depends on these being in sorted - # order. - proposed_dict[name] = sorted(common_pfmris) - elif stripped_by_publisher: - for p, vs in rel_ps: - wrongpub.append((p, rejected_pubs[p])) - else: - multispec.append(tuple([name] + - [p for p, vs in rel_ps])) - - if match_type != ImagePlan.MATCH_ALL: - not_installed, nonmatch = nonmatch, not_installed - - if illegals or nonmatch or multimatch or \ - (not_installed and raise_not_installed) or multispec or \ - wrongpub or wrongvar or exclpats: - if not raise_not_installed: - not_installed = [] - raise api_errors.PlanCreationException( - unmatched_fmris=nonmatch, - multiple_matches=multimatch, illegal=illegals, - missing_matches=not_installed, multispec=multispec, - wrong_publishers=wrongpub, wrong_variants=wrongvar, - rejected_pats=exclpats) - - # eliminate lower ranked publishers - if match_type != ImagePlan.MATCH_INST_VERSIONS: - # no point for installed pkgs.... - for pkg_name in proposed_dict: - pubs_found = set([ - f.publisher - for f in proposed_dict[pkg_name] - ]) - # 1000 is hack for installed but unconfigured - # publishers - best_pub = sorted([ - (pub_ranks.get(p, (1000, True))[0], p) - for p in pubs_found - ])[0][1] - - # Include any installed FMRIs that were allowed - # by all of the previous filtering, even if they - # aren't from the "best" available publisher, to - # account for the scenario where the installed - # version is the newest or best version for the - # plan solution. While doing so, also eliminate - # any exact duplicate FMRIs from publishers - # other than the installed publisher to prevent - # thrashing in the solver due to many equiv. - # solutions and unexpected changes in package - # publishers that weren't explicitly requested. - inst_f = installed_pkgs.get(f.pkg_name, None) - inst_v = None - if inst_f not in proposed_dict[pkg_name]: - # Should only apply if it's part of the - # proposed set. - inst_f = None - else: - inst_v = inst_f.version - - proposed_dict[pkg_name] = [ - f for f in proposed_dict[pkg_name] - if f == inst_f or \ - (f.publisher == best_pub and - f.version != inst_v) - ] - - # construct references so that we can know which pattern - # generated which fmris... - references = dict([ + inst_v = inst_f.version + + proposed_dict[pkg_name] = [ + f + for f in proposed_dict[pkg_name] + if f == inst_f + or (f.publisher == best_pub and f.version != inst_v) + ] + + # construct references so that we can know which pattern + # generated which fmris... + references = dict( + [ + (f, p) + for p in ret.keys() + for flist in ret[p].values() + for f in flist + if f in proposed_dict[f.pkg_name] + ] + ) + + # Discard all but the newest version of each match. + if latest_pats: + # Rebuild proposed_dict based on latest version of every + # package. + sort_key = operator.attrgetter("version") + for pname, flist in six.iteritems(proposed_dict): + # Must sort on version; sorting by FMRI would + # sort by publisher, then by version which is + # not desirable. + platest = sorted(flist, key=sort_key)[-1] + if references[platest] not in latest_pats: + # Nothing to do. + continue + + # Filter out all versions except the latest for + # each matching package. Allow for multiple + # FMRIs of the same latest version. (There + # might be more than one publisher with the + # same version.) + proposed_dict[pname] = [ + f for f in flist if f.version == platest.version + ] + + # Construct references again to match final state + # of proposed_dict. + references = dict( + [ (f, p) for p in ret.keys() for flist in ret[p].values() for f in flist if f in proposed_dict[f.pkg_name] - ]) - - # Discard all but the newest version of each match. - if latest_pats: - # Rebuild proposed_dict based on latest version of every - # package. - sort_key = operator.attrgetter("version") - for pname, flist in six.iteritems(proposed_dict): - # Must sort on version; sorting by FMRI would - # sort by publisher, then by version which is - # not desirable. - platest = sorted(flist, key=sort_key)[-1] - if references[platest] not in latest_pats: - # Nothing to do. - continue - - # Filter out all versions except the latest for - # each matching package. Allow for multiple - # FMRIs of the same latest version. (There - # might be more than one publisher with the - # same version.) - proposed_dict[pname] = [ - f for f in flist - if f.version == platest.version - ] - - # Construct references again to match final state - # of proposed_dict. - references = dict([ - (f, p) - for p in ret.keys() - for flist in ret[p].values() - for f in flist - if f in proposed_dict[f.pkg_name] - ]) - - return proposed_dict, references - - @staticmethod - def freeze_pkgs_match(image, pats): - """Find the packages which match the given patterns and thus - should be frozen.""" - - pats = set(pats) - freezes = set() - pub_ranks = image.get_publisher_ranks() - installed_version_mismatches = {} - versionless_uninstalled = set() - multiversions = [] - - # Find the installed packages that match the provided patterns. - inst_dict, references = ImagePlan.__match_user_fmris(image, - pats, ImagePlan.MATCH_INST_VERSIONS, pub_ranks=pub_ranks, - raise_not_installed=False) - - # Find the installed package stems that match the provided - # patterns. - installed_stems_dict = ImagePlan.match_user_stems(image, pats, - ImagePlan.MATCH_INST_VERSIONS, raise_unmatched=False, - raise_not_installed=False, return_matchdict=True) - - stems_of_fmri_matches = set(inst_dict.keys()) - stems_of_stems_matches = set(installed_stems_dict.keys()) - - assert stems_of_fmri_matches.issubset(stems_of_stems_matches) - - # For each package stem which matched a pattern only when - # versions were ignored ... - for stem in stems_of_stems_matches - stems_of_fmri_matches: - # If more than one pattern matched this stem, then - # match_user_stems should've raised an exception. - assert len(installed_stems_dict[stem]) == 1 - bad_pat = installed_stems_dict[stem][0] - installed_version_mismatches.setdefault( - bad_pat, []).append(stem) - # If this pattern is bad, then we don't care about it - # anymore. - pats.discard(bad_pat) - - # For each fmri, pattern where the pattern matched the fmri - # including the version ... - for full_fmri, pat in six.iteritems(references): - parts = pat.split("@", 1) - # If the pattern doesn't include a version, then add the - # version the package is installed at to the list of - # things to freeze. If it does include a version, then - # just freeze using the version from the pattern, and - # the name from the matching fmri. - if len(parts) < 2 or parts[1] == "": - freezes.add(full_fmri.get_fmri(anarchy=True, - include_scheme=False)) - else: - freezes.add(full_fmri.pkg_name + "@" + parts[1]) - # We're done with this pattern now. - pats.discard(pat) - - # Any wildcarded patterns remaining matched no installed - # packages and so are invalid arguments to freeze. - unmatched_wildcards = set([ - pat for pat in pats if "*" in pat or "?" in pat - ]) - pats -= unmatched_wildcards - - # Now check the remaining pats to ensure they have a version - # component. If they don't, then they can't be used to freeze - # uninstalled packages. - for pat in pats: - parts = pat.split("@", 1) - if len(parts) < 2 or parts[1] == "": - versionless_uninstalled.add(pat) - pats -= versionless_uninstalled - freezes |= pats - - stems = {} - for p in freezes: - stems.setdefault(pkg.fmri.PkgFmri(p).get_pkg_stem( - anarchy=True, include_scheme=False), set()).add(p) - # Check whether one stem has been frozen at non-identical - # versions. - for k, v in six.iteritems(stems): - if len(v) > 1: - multiversions.append((k, v)) - else: - stems[k] = v.pop() - - if versionless_uninstalled or unmatched_wildcards or \ - installed_version_mismatches or multiversions: - raise api_errors.FreezePkgsException( - multiversions=multiversions, - unmatched_wildcards=unmatched_wildcards, - version_mismatch=installed_version_mismatches, - versionless_uninstalled=versionless_uninstalled) - return stems + ] + ) + + return proposed_dict, references + + @staticmethod + def freeze_pkgs_match(image, pats): + """Find the packages which match the given patterns and thus + should be frozen.""" + + pats = set(pats) + freezes = set() + pub_ranks = image.get_publisher_ranks() + installed_version_mismatches = {} + versionless_uninstalled = set() + multiversions = [] + + # Find the installed packages that match the provided patterns. + inst_dict, references = ImagePlan.__match_user_fmris( + image, + pats, + ImagePlan.MATCH_INST_VERSIONS, + pub_ranks=pub_ranks, + raise_not_installed=False, + ) + + # Find the installed package stems that match the provided + # patterns. + installed_stems_dict = ImagePlan.match_user_stems( + image, + pats, + ImagePlan.MATCH_INST_VERSIONS, + raise_unmatched=False, + raise_not_installed=False, + return_matchdict=True, + ) + + stems_of_fmri_matches = set(inst_dict.keys()) + stems_of_stems_matches = set(installed_stems_dict.keys()) + + assert stems_of_fmri_matches.issubset(stems_of_stems_matches) + + # For each package stem which matched a pattern only when + # versions were ignored ... + for stem in stems_of_stems_matches - stems_of_fmri_matches: + # If more than one pattern matched this stem, then + # match_user_stems should've raised an exception. + assert len(installed_stems_dict[stem]) == 1 + bad_pat = installed_stems_dict[stem][0] + installed_version_mismatches.setdefault(bad_pat, []).append(stem) + # If this pattern is bad, then we don't care about it + # anymore. + pats.discard(bad_pat) + + # For each fmri, pattern where the pattern matched the fmri + # including the version ... + for full_fmri, pat in six.iteritems(references): + parts = pat.split("@", 1) + # If the pattern doesn't include a version, then add the + # version the package is installed at to the list of + # things to freeze. If it does include a version, then + # just freeze using the version from the pattern, and + # the name from the matching fmri. + if len(parts) < 2 or parts[1] == "": + freezes.add( + full_fmri.get_fmri(anarchy=True, include_scheme=False) + ) + else: + freezes.add(full_fmri.pkg_name + "@" + parts[1]) + # We're done with this pattern now. + pats.discard(pat) + + # Any wildcarded patterns remaining matched no installed + # packages and so are invalid arguments to freeze. + unmatched_wildcards = set( + [pat for pat in pats if "*" in pat or "?" in pat] + ) + pats -= unmatched_wildcards + + # Now check the remaining pats to ensure they have a version + # component. If they don't, then they can't be used to freeze + # uninstalled packages. + for pat in pats: + parts = pat.split("@", 1) + if len(parts) < 2 or parts[1] == "": + versionless_uninstalled.add(pat) + pats -= versionless_uninstalled + freezes |= pats + + stems = {} + for p in freezes: + stems.setdefault( + pkg.fmri.PkgFmri(p).get_pkg_stem( + anarchy=True, include_scheme=False + ), + set(), + ).add(p) + # Check whether one stem has been frozen at non-identical + # versions. + for k, v in six.iteritems(stems): + if len(v) > 1: + multiversions.append((k, v)) + else: + stems[k] = v.pop() + + if ( + versionless_uninstalled + or unmatched_wildcards + or installed_version_mismatches + or multiversions + ): + raise api_errors.FreezePkgsException( + multiversions=multiversions, + unmatched_wildcards=unmatched_wildcards, + version_mismatch=installed_version_mismatches, + versionless_uninstalled=versionless_uninstalled, + ) + return stems + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/imagetypes.py b/src/modules/client/imagetypes.py index 8a960aeed..a9f6515ec 100644 --- a/src/modules/client/imagetypes.py +++ b/src/modules/client/imagetypes.py @@ -32,9 +32,9 @@ IMG_NONE: "none", IMG_ENTIRE: "full", IMG_PARTIAL: "partial", - IMG_USER: "user" + IMG_USER: "user", } # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/indexer.py b/src/modules/client/indexer.py index 6215ed784..4465426da 100644 --- a/src/modules/client/indexer.py +++ b/src/modules/client/indexer.py @@ -34,39 +34,55 @@ import pkg.search_storage as ss from pkg.misc import EmptyI + class Indexer(indexer.Indexer): - def __init__(self, image, get_manf_func, get_manifest_path, - progtrack=None, excludes=EmptyI): - indexer.Indexer.__init__(self, image.index_dir, get_manf_func, - get_manifest_path, progtrack, excludes) - self.image = image - self._data_dict['full_fmri_hash'] = \ - ss.IndexStoreSetHash('full_fmri_list.hash') - self._data_full_fmri_hash = self._data_dict['full_fmri_hash'] + def __init__( + self, + image, + get_manf_func, + get_manifest_path, + progtrack=None, + excludes=EmptyI, + ): + indexer.Indexer.__init__( + self, + image.index_dir, + get_manf_func, + get_manifest_path, + progtrack, + excludes, + ) + self.image = image + self._data_dict["full_fmri_hash"] = ss.IndexStoreSetHash( + "full_fmri_list.hash" + ) + self._data_full_fmri_hash = self._data_dict["full_fmri_hash"] + + def _write_assistant_dicts(self, out_dir): + """Gives the full_fmri hash object the data it needs before + the superclass is called to write out the dictionaries. + """ + self._data_full_fmri_hash.set_hash(self._data_full_fmri.get_set()) + indexer.Indexer._write_assistant_dicts(self, out_dir) - def _write_assistant_dicts(self, out_dir): - """Gives the full_fmri hash object the data it needs before - the superclass is called to write out the dictionaries. - """ - self._data_full_fmri_hash.set_hash( - self._data_full_fmri.get_set()) - indexer.Indexer._write_assistant_dicts(self, out_dir) + def check_index_has_exactly_fmris(self, fmri_names): + """Checks to see if the fmris given are the ones indexed.""" + try: + res = ss.consistent_open( + self._data_dict.values(), + self._index_dir, + self._file_timeout_secs, + ) + if ( + res is not None + and not self._data_full_fmri_hash.check_against_file(fmri_names) + ): + res = None + finally: + for d in self._data_dict.values(): + d.close_file_handle() + return res is not None - def check_index_has_exactly_fmris(self, fmri_names): - """Checks to see if the fmris given are the ones indexed. - """ - try: - res = \ - ss.consistent_open(self._data_dict.values(), - self._index_dir, self._file_timeout_secs) - if res is not None and \ - not self._data_full_fmri_hash.check_against_file( - fmri_names): - res = None - finally: - for d in self._data_dict.values(): - d.close_file_handle() - return res is not None # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/linkedimage/__init__.py b/src/modules/client/linkedimage/__init__.py index 1b36fcdc9..9481f1eed 100644 --- a/src/modules/client/linkedimage/__init__.py +++ b/src/modules/client/linkedimage/__init__.py @@ -36,10 +36,10 @@ import inspect # import linked image common code -from pkg.client.linkedimage.common import * # pylint: disable=W0401, W0622 +from pkg.client.linkedimage.common import * # pylint: disable=W0401, W0622 # names of linked image plugins -p_types = [ "zone", "system" ] +p_types = ["zone", "system"] # map of plugin names to their associated LinkedImagePlugin derived class p_classes = {} @@ -52,29 +52,33 @@ # initialize p_classes and p_classes_child for _modname in p_types: - _module = __import__("{0}.{1}".format(__name__, _modname), - globals(), locals(), [_modname]) + _module = __import__( + "{0}.{1}".format(__name__, _modname), globals(), locals(), [_modname] + ) - # Find all the classes actually defined in this module. - _nvlist = inspect.getmembers(_module, inspect.isclass) - _classes = [ - _i[1] - for _i in _nvlist - if _i[1].__module__ == ("{0}.{1}".format(__name__, _modname)) - ] + # Find all the classes actually defined in this module. + _nvlist = inspect.getmembers(_module, inspect.isclass) + _classes = [ + _i[1] + for _i in _nvlist + if _i[1].__module__ == ("{0}.{1}".format(__name__, _modname)) + ] - for _i in _classes: - if LinkedImagePlugin in inspect.getmro(_i): - p_classes[_modname] = _i - elif LinkedImageChildPlugin in inspect.getmro(_i): - p_classes_child[_modname] = _i - else: - raise RuntimeError(""" + for _i in _classes: + if LinkedImagePlugin in inspect.getmro(_i): + p_classes[_modname] = _i + elif LinkedImageChildPlugin in inspect.getmro(_i): + p_classes_child[_modname] = _i + else: + raise RuntimeError( + """ Invalid linked image plugin class '{0}' for plugin '{1}'""".format( - _i.__name__, _modname)) + _i.__name__, _modname + ) + ) # Clean up temporary variables del _modname, _module, _nvlist, _classes, _i # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/linkedimage/common.py b/src/modules/client/linkedimage/common.py index f416cbca6..5aec41525 100644 --- a/src/modules/client/linkedimage/common.py +++ b/src/modules/client/linkedimage/common.py @@ -80,35 +80,39 @@ # linked image relationship types (returned by LinkedImage.list_related()) REL_PARENT = "parent" -REL_SELF = "self" -REL_CHILD = "child" +REL_SELF = "self" +REL_CHILD = "child" # linked image properties PROP_CURRENT_PARENT_PATH = "li-current-parent" -PROP_CURRENT_PATH = "li-current-path" -PROP_MODEL = "li-model" -PROP_NAME = "li-name" -PROP_PARENT_PATH = "li-parent" -PROP_PATH = "li-path" -PROP_PATH_TRANSFORM = "li-path-transform" -PROP_RECURSE = "li-recurse" -prop_values = frozenset([ - PROP_CURRENT_PARENT_PATH, - PROP_CURRENT_PATH, - PROP_MODEL, - PROP_NAME, - PROP_PARENT_PATH, - PROP_PATH, - PROP_PATH_TRANSFORM, - PROP_RECURSE, -]) +PROP_CURRENT_PATH = "li-current-path" +PROP_MODEL = "li-model" +PROP_NAME = "li-name" +PROP_PARENT_PATH = "li-parent" +PROP_PATH = "li-path" +PROP_PATH_TRANSFORM = "li-path-transform" +PROP_RECURSE = "li-recurse" +prop_values = frozenset( + [ + PROP_CURRENT_PARENT_PATH, + PROP_CURRENT_PATH, + PROP_MODEL, + PROP_NAME, + PROP_PARENT_PATH, + PROP_PATH, + PROP_PATH_TRANSFORM, + PROP_RECURSE, + ] +) # properties that never get saved -temporal_props = frozenset([ - PROP_CURRENT_PARENT_PATH, - PROP_CURRENT_PATH, - PROP_PATH_TRANSFORM, -]) +temporal_props = frozenset( + [ + PROP_CURRENT_PARENT_PATH, + PROP_CURRENT_PATH, + PROP_PATH_TRANSFORM, + ] +) # special linked image name values (PROP_NAME) PV_NAME_NONE = "-" @@ -116,17 +120,19 @@ # linked image model values (PROP_MODEL) PV_MODEL_PUSH = "push" PV_MODEL_PULL = "pull" -model_values = frozenset([ - PV_MODEL_PUSH, - PV_MODEL_PULL, -]) +model_values = frozenset( + [ + PV_MODEL_PUSH, + PV_MODEL_PULL, + ] +) # files which contain linked image data -__DATA_DIR = "linked" -PATH_PFACETS = os.path.join(__DATA_DIR, "linked_pfacets") -PATH_PPKGS = os.path.join(__DATA_DIR, "linked_ppkgs") -PATH_PROP = os.path.join(__DATA_DIR, "linked_prop") -PATH_PUBS = os.path.join(__DATA_DIR, "linked_ppubs") +__DATA_DIR = "linked" +PATH_PFACETS = os.path.join(__DATA_DIR, "linked_pfacets") +PATH_PPKGS = os.path.join(__DATA_DIR, "linked_ppkgs") +PATH_PROP = os.path.join(__DATA_DIR, "linked_prop") +PATH_PUBS = os.path.join(__DATA_DIR, "linked_ppubs") # # we define PATH_TRANSFORM_NONE as a tuple instead of just None because this @@ -136,3788 +142,3943 @@ LI_RVTuple = collections.namedtuple("LI_RVTuple", "rvt_rv rvt_e rvt_p_dict") -def _li_rvtuple_check(rvtuple): - """Sanity check a linked image operation return value tuple. - The format of said tuple is: - process return code - LinkedImageException exception (optional) - json dictionary containing planned image changes - """ - - # make sure we're using the LI_RVTuple class - assert type(rvtuple) == LI_RVTuple - # decode the tuple - rv, e, p_dict = rvtuple +def _li_rvtuple_check(rvtuple): + """Sanity check a linked image operation return value tuple. + The format of said tuple is: + process return code + LinkedImageException exception (optional) + json dictionary containing planned image changes + """ + + # make sure we're using the LI_RVTuple class + assert type(rvtuple) == LI_RVTuple + + # decode the tuple + rv, e, p_dict = rvtuple + + # rv must be an integer + assert type(rv) == int + # any exception returned must be a LinkedImageException + assert e is None or type(e) == apx.LinkedImageException + # if specified, p_dict must be a dictionary + assert p_dict is None or type(p_dict) is dict + # some child return codes should never be associated with an exception + assert rv not in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP] or e is None + # a p_dict can only be returned if the child returned EXIT_OK + assert rv == pkgdefs.EXIT_OK or p_dict is None + + # return the value that was passed in + return rvtuple - # rv must be an integer - assert type(rv) == int - # any exception returned must be a LinkedImageException - assert e is None or type(e) == apx.LinkedImageException - # if specified, p_dict must be a dictionary - assert p_dict is None or type(p_dict) is dict - # some child return codes should never be associated with an exception - assert rv not in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP] or e is None - # a p_dict can only be returned if the child returned EXIT_OK - assert rv == pkgdefs.EXIT_OK or p_dict is None - - # return the value that was passed in - return rvtuple def _li_rvdict_check(rvdict): - """Given a linked image return value dictionary, sanity check all the - entries.""" + """Given a linked image return value dictionary, sanity check all the + entries.""" - assert type(rvdict) == dict - for k, v in six.iteritems(rvdict): - assert type(k) == LinkedImageName, \ - ("Unexpected rvdict key: ", k) - _li_rvtuple_check(v) + assert type(rvdict) == dict + for k, v in six.iteritems(rvdict): + assert type(k) == LinkedImageName, ("Unexpected rvdict key: ", k) + _li_rvtuple_check(v) + + # return the value that was passed in + return rvdict - # return the value that was passed in - return rvdict def _li_rvdict_exceptions(rvdict): - """Given a linked image return value dictionary, return a list of any - exceptions that were encountered while processing children.""" + """Given a linked image return value dictionary, return a list of any + exceptions that were encountered while processing children.""" - # sanity check rvdict - _li_rvdict_check(rvdict) + # sanity check rvdict + _li_rvdict_check(rvdict) + + # get a list of exceptions + return [ + rvtuple.rvt_e + for rvtuple in rvdict.values() + if rvtuple.rvt_e is not None + ] - # get a list of exceptions - return [ - rvtuple.rvt_e - for rvtuple in rvdict.values() - if rvtuple.rvt_e is not None - ] def _li_rvdict_raise_exceptions(rvdict): - """If an exception was encountered while operating on a linked - child then raise that exception. If multiple exceptions were - encountered while operating on multiple children, then bundle - those exceptions together and raise them.""" + """If an exception was encountered while operating on a linked + child then raise that exception. If multiple exceptions were + encountered while operating on multiple children, then bundle + those exceptions together and raise them.""" - # get a list of exceptions - exceptions = _li_rvdict_exceptions(rvdict) + # get a list of exceptions + exceptions = _li_rvdict_exceptions(rvdict) - if len(exceptions) == 1: - # one exception encountered - raise exceptions[0] + if len(exceptions) == 1: + # one exception encountered + raise exceptions[0] + + if exceptions: + # multiple exceptions encountered + raise apx.LinkedImageException(bundle=exceptions) - if exceptions: - # multiple exceptions encountered - raise apx.LinkedImageException(bundle=exceptions) class LinkedImagePlugin(object): - """This class is a template that all linked image plugins should - inherit from. Linked image plugins derived from this class are - designed to manage linked aspects of the current image (vs managing - linked aspects of a specific child of the current image). + """This class is a template that all linked image plugins should + inherit from. Linked image plugins derived from this class are + designed to manage linked aspects of the current image (vs managing + linked aspects of a specific child of the current image). - All the interfaces exported by this class and its descendants are - private to the linked image subsystem and should not be called - directly by any other subsystem.""" + All the interfaces exported by this class and its descendants are + private to the linked image subsystem and should not be called + directly by any other subsystem.""" - # functionality flags - support_attach = False - support_detach = False + # functionality flags + support_attach = False + support_detach = False - # Unused argument; pylint: disable=W0613 - def __init__(self, pname, linked): - """Initialize a linked image plugin. + # Unused argument; pylint: disable=W0613 + def __init__(self, pname, linked): + """Initialize a linked image plugin. - 'pname' is the name of the plugin class derived from this - base class. + 'pname' is the name of the plugin class derived from this + base class. - 'linked' is the LinkedImage object initializing this plugin. - """ + 'linked' is the LinkedImage object initializing this plugin. + """ - return + return - def init_root(self, root): - """Called when the path to the image that we're operating on - is changing. This normally occurs when we clone an image - after we've planned and prepared to do an operation.""" + def init_root(self, root): + """Called when the path to the image that we're operating on + is changing. This normally occurs when we clone an image + after we've planned and prepared to do an operation.""" - # return value: None - raise NotImplementedError + # return value: None + raise NotImplementedError - def guess_path_transform(self, ignore_errors=False): - """If the linked image plugin is able to detect that we're - operating on an image in an alternate root then return an - transform that can be used to translate between the original - image path and the current one.""" + def guess_path_transform(self, ignore_errors=False): + """If the linked image plugin is able to detect that we're + operating on an image in an alternate root then return an + transform that can be used to translate between the original + image path and the current one.""" - # return value: string or None - raise NotImplementedError + # return value: string or None + raise NotImplementedError - def get_child_list(self, nocache=False, ignore_errors=False): - """Return a list of the child images and paths associated with - the current image. The paths that are returned should be - absolute paths to the original child image locations.""" + def get_child_list(self, nocache=False, ignore_errors=False): + """Return a list of the child images and paths associated with + the current image. The paths that are returned should be + absolute paths to the original child image locations.""" - # return value: list - raise NotImplementedError + # return value: list + raise NotImplementedError - def get_child_props(self, lin): - """Get the linked image properties associated with the - specified child image.""" + def get_child_props(self, lin): + """Get the linked image properties associated with the + specified child image.""" - # return value: dict - raise NotImplementedError + # return value: dict + raise NotImplementedError - def attach_child_inmemory(self, props, allow_relink): - """Attach the specified child image. This operation should - only affect in-memory state of the current image. It should - not update any persistent on-disk linked image state or access - the child image in any way. This routine should assume that - the linked image properties have already been validated.""" + def attach_child_inmemory(self, props, allow_relink): + """Attach the specified child image. This operation should + only affect in-memory state of the current image. It should + not update any persistent on-disk linked image state or access + the child image in any way. This routine should assume that + the linked image properties have already been validated.""" - # return value: None - raise NotImplementedError + # return value: None + raise NotImplementedError - def detach_child_inmemory(self, lin): - """Detach the specified child image. This operation should - only affect in-memory state of the current image. It should - not update any persistent on-disk linked image state or access - the child image in any way.""" + def detach_child_inmemory(self, lin): + """Detach the specified child image. This operation should + only affect in-memory state of the current image. It should + not update any persistent on-disk linked image state or access + the child image in any way.""" - # return value: None - raise NotImplementedError + # return value: None + raise NotImplementedError - def sync_children_todisk(self): - """Sync out the in-memory linked image state of this image to - disk.""" + def sync_children_todisk(self): + """Sync out the in-memory linked image state of this image to + disk.""" - # return value: LI_RVTuple() - raise NotImplementedError + # return value: LI_RVTuple() + raise NotImplementedError class LinkedImageChildPlugin(object): - """This class is a template that all linked image child plugins should - inherit from. Linked image child plugins derived from this class are - designed to manage linked aspects of children of the current image. - (vs managing linked aspects of the current image itself). + """This class is a template that all linked image child plugins should + inherit from. Linked image child plugins derived from this class are + designed to manage linked aspects of children of the current image. + (vs managing linked aspects of the current image itself). - All the interfaces exported by this class and its descendants are - private to the linked image subsystem and should not be called - directly by any other subsystem.""" + All the interfaces exported by this class and its descendants are + private to the linked image subsystem and should not be called + directly by any other subsystem.""" - def __init__(self, lic): # Unused argument; pylint: disable=W0613 - """Initialize a linked image child plugin. + def __init__(self, lic): # Unused argument; pylint: disable=W0613 + """Initialize a linked image child plugin. - 'lic' is the LinkedImageChild object initializing this plugin. - """ + 'lic' is the LinkedImageChild object initializing this plugin. + """ - return + return - def munge_props(self, props): - """Called before a parent image saves linked image properties - into a child image. Gives the linked image child plugin a - chance to update the properties that will be saved within the - child image.""" + def munge_props(self, props): + """Called before a parent image saves linked image properties + into a child image. Gives the linked image child plugin a + chance to update the properties that will be saved within the + child image.""" - # return value: None - raise NotImplementedError + # return value: None + raise NotImplementedError class LinkedImageName(object): - """A class for naming child linked images. Linked image names are - used for all child images (and only child images), and they encode two - pieces of information. The name of the plugin used to manage the - image and a linked image name. Linked image names have the following - format ":""" + """A class for naming child linked images. Linked image names are + used for all child images (and only child images), and they encode two + pieces of information. The name of the plugin used to manage the + image and a linked image name. Linked image names have the following + format ":""" - def __init__(self, name): - assert type(name) == str + def __init__(self, name): + assert type(name) == str - self.lin_type = self.lin_name = None + self.lin_type = self.lin_name = None + + try: + self.lin_type, self.lin_name = name.split(":") + except ValueError: + raise apx.LinkedImageException(lin_malformed=name) + + if len(self.lin_type) == 0 or len(self.lin_name) == 0: + raise apx.LinkedImageException(lin_malformed=name) + + if self.lin_type not in pkg.client.linkedimage.p_types: + raise apx.LinkedImageException(lin_malformed=name) + + @staticmethod + def getstate(obj, je_state=None): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + # Unused argument; pylint: disable=W0613 + return str(obj) + + @staticmethod + def fromstate(state, jd_state=None): + """Allocate a new object using previously serialized state + obtained via getstate().""" + # Unused argument; pylint: disable=W0613 + return LinkedImageName(state) + + def __str__(self): + return "{0}:{1}".format(self.lin_type, self.lin_name) + + def __len__(self): + return len(self.__str__()) + + def __lt__(self, other): + assert type(self) == LinkedImageName + if not other: + return False + if other == PV_NAME_NONE: + return False + assert type(other) == LinkedImageName + if self.lin_type < other.lin_type: + return True + if self.lin_type != other.lin_type: + return False + return self.lin_name < other.lin_name + + def __gt__(self, other): + assert type(self) == LinkedImageName + if not other: + return True + if other == PV_NAME_NONE: + return True + assert type(other) == LinkedImageName + if self.lin_type > other.lin_type: + return True + if self.lin_type != other.lin_type: + return False + return self.lin_name > other.lin_name + + def __le__(self, other): + return not self > other + + def __ge__(self, other): + return not self < other + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + if not isinstance(other, LinkedImageName): + return False + + return str(self) == str(other) + + def __ne__(self, other): + return not self.__eq__(other) - try: - self.lin_type, self.lin_name = name.split(":") - except ValueError: - raise apx.LinkedImageException(lin_malformed=name) - - if len(self.lin_type) == 0 or len(self.lin_name) == 0 : - raise apx.LinkedImageException(lin_malformed=name) - - if self.lin_type not in pkg.client.linkedimage.p_types: - raise apx.LinkedImageException(lin_malformed=name) - - @staticmethod - def getstate(obj, je_state=None): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - # Unused argument; pylint: disable=W0613 - return str(obj) - - @staticmethod - def fromstate(state, jd_state=None): - """Allocate a new object using previously serialized state - obtained via getstate().""" - # Unused argument; pylint: disable=W0613 - return LinkedImageName(state) - - def __str__(self): - return "{0}:{1}".format(self.lin_type, self.lin_name) - - def __len__(self): - return len(self.__str__()) - - def __lt__(self, other): - assert type(self) == LinkedImageName - if not other: - return False - if other == PV_NAME_NONE: - return False - assert type(other) == LinkedImageName - if self.lin_type < other.lin_type: - return True - if self.lin_type != other.lin_type: - return False - return self.lin_name < other.lin_name - - def __gt__(self, other): - assert type(self) == LinkedImageName - if not other: - return True - if other == PV_NAME_NONE: - return True - assert type(other) == LinkedImageName - if self.lin_type > other.lin_type: - return True - if self.lin_type != other.lin_type: - return False - return self.lin_name > other.lin_name - - def __le__(self, other): - return not self > other - - def __ge__(self, other): - return not self < other - - def __hash__(self): - return hash(str(self)) - - def __eq__(self, other): - if not isinstance(other, LinkedImageName): - return False - - return str(self) == str(other) - - def __ne__(self, other): - return not self.__eq__(other) class LinkedImage(object): - """A LinkedImage object is used to manage the linked image aspects of - an image. This image could be a child image, a parent image, or both - a parent and child. This object allows for access to linked image - properties and also provides routines that allow operations to be - performed on child images.""" - - # Properties that a parent image with push children should save locally. - __parent_props = frozenset([ - PROP_PATH - ]) - - # Properties that a pull child image should save locally. - __pull_child_props = frozenset([ + """A LinkedImage object is used to manage the linked image aspects of + an image. This image could be a child image, a parent image, or both + a parent and child. This object allows for access to linked image + properties and also provides routines that allow operations to be + performed on child images.""" + + # Properties that a parent image with push children should save locally. + __parent_props = frozenset([PROP_PATH]) + + # Properties that a pull child image should save locally. + __pull_child_props = frozenset( + [ PROP_NAME, PROP_PATH, PROP_MODEL, PROP_PARENT_PATH, - ]) + ] + ) - # Properties that a parent image with push children should save in - # those children. - __push_child_props = frozenset([ + # Properties that a parent image with push children should save in + # those children. + __push_child_props = frozenset( + [ PROP_NAME, PROP_PATH, PROP_MODEL, PROP_RECURSE, - ]) - - # make sure there is no invalid overlap - assert not (temporal_props & ( - __parent_props | - __pull_child_props | - __push_child_props)) - - def __init__(self, img): - """Initialize a new LinkedImage object.""" - - # globals - self.__img = img - - # variables reset by self.__update_props() - self.__props = dict() - self.__ppkgs = frozenset() - self.__ppubs = None - self.__pfacets = pkg.facet.Facets() - self.__pimg = None - - # variables reset by self.__recursion_init() - self.__lic_ignore = None - self.__lic_dict = {} - - # variables reset by self._init_root() - self.__root = None - self.__path_ppkgs = None - self.__path_prop = None - self.__path_ppubs = None - self.__path_pfacets = None - self.__img_insync = True - - # initialize with no properties - self.__update_props() - - # initialize linked image plugin objects - self.__plugins = dict() - for p in pkg.client.linkedimage.p_types: - self.__plugins[p] = \ - pkg.client.linkedimage.p_classes[p](p, self) - - # if the image has a path setup, we can load data from it. - if self.__img.imgdir: - self._init_root() - - @property - def image(self): - """Get a pointer to the image object associated with this - linked image object.""" - return self.__img - - def _init_root(self): - """Called during object initialization and by - image.py`__set_root() to let us know when we're changing the - root location of the image. (The only time we change the root - path is when changes BEs during operations which clone BEs. - So when this happens most our metadata shouldn't actually - change.""" - - assert self.__img.root, \ - "root = {0}".format(str(self.__img.root)) - assert self.__img.imgdir, \ - "imgdir = {0}".format(str(self.__img.imgdir)) - - # Check if this is our first time accessing the current image - # or if we're just re-initializing ourselves. - first_pass = self.__root is None - - # figure out the new root image path - root = self.__img.root.rstrip(os.sep) + os.sep - - # initialize paths for linked image data files - self.__root = root - imgdir = self.__img.imgdir.rstrip(os.sep) + os.sep - self.__path_ppkgs = os.path.join(imgdir, PATH_PPKGS) - self.__path_prop = os.path.join(imgdir, PATH_PROP) - self.__path_ppubs = os.path.join(imgdir, PATH_PUBS) - self.__path_pfacets = os.path.join(imgdir, PATH_PFACETS) - - # if this isn't a reset, then load data from the image - if first_pass: - # the first time around we load non-temporary data (if - # there is any) so that we can audit ourselves and see - # if we're in currently in sync. - self.__load(tmp=False) - if self.ischild(): - self.__img_insync = self.__insync() - - # now re-load all the data taking into account any - # temporary new data associated with an in-progress - # operation. - self.__load() - - # if we're not linked we're done - if not self.__props: - return - - # if this is a reset, update temporal properties - if not first_pass: - self.__set_current_path(self.__props, update=True) - - # Tell linked image plugins about the updated paths - # Unused variable 'plugin'; pylint: disable=W0612 - for plugin, lip in six.iteritems(self.__plugins): - # pylint: enable=W0612 - lip.init_root(root) - - # Tell linked image children about the updated paths - for lic in six.itervalues(self.__lic_dict): - lic.child_init_root() - - def __update_props(self, props=None): - """Internal helper routine used when we want to update any - linked image properties. This routine sanity check the - new properties, updates them, and resets any cached state - that is affected by property values.""" - - if props is None: - props = dict() - elif props: - self.__verify_props(props) - - # all temporal properties must exist - for p in temporal_props: - # PROP_CURRENT_PARENT_PATH can only be set if - # we have PROP_PARENT_PATH. - if p is PROP_CURRENT_PARENT_PATH and \ - PROP_PARENT_PATH not in props: - continue - assert p in props, \ - "'{0}' not in {1}".format(p, set(props)) - - # update state - self.__props = props - self.__ppkgs = frozenset() - self.__ppubs = None - self.__pfacets = pkg.facet.Facets() - self.__pimg = None - - def __verify_props(self, props): - """Perform internal consistency checks for a set of linked - image properties. Don't update any state.""" - - props_set = set(props) - - # if we're not a child image ourselves, then we're done - if (props_set - temporal_props) == self.__parent_props: - return props - - # make sure PROP_MODEL was specified - if PROP_NAME not in props: - _rterr(path=self.__root, - missing_props=[PROP_NAME]) - - # validate the linked image name - try: - lin = LinkedImageName(str(props[PROP_NAME])) - except apx.LinkedImageException: - _rterr(path=self.__root, - bad_prop=(PROP_NAME, props[PROP_NAME])) - - if lin.lin_type not in self.__plugins: - _rterr(path=self.__root, lin=lin, - bad_lin_type=lin.lin_type) - - # make sure PROP_MODEL was specified - if PROP_MODEL not in props: - _rterr(path=self.__root, lin=lin, - missing_props=[PROP_MODEL]) - - model = props[PROP_MODEL] - if model not in model_values: - _rterr(path=self.__root, lin=lin, - bad_prop=(PROP_MODEL, model)) - - if model == PV_MODEL_PUSH: - missing = self.__push_child_props - props_set - if missing: - _rterr(path=self.__root, lin=lin, - missing_props=missing) - - if model == PV_MODEL_PULL: - missing = self.__pull_child_props - props_set - if missing: - _rterr(path=self.__root, lin=lin, - missing_props=missing) - - @staticmethod - def set_path_transform(props, path_transform, - path=None, current_path=None, update=False): - """Given a new path_transform, update path properties.""" - - if update: - assert (set(props) & temporal_props), \ - "no temporal properties are set: {0}".format(props) - else: - assert not (set(props) & temporal_props), \ - "temporal properties already set: {0}".format(props) - - # Either 'path' or 'current_path' must be specified. - assert path is None or current_path is None - assert path is not None or current_path is not None - - if path is not None: - current_path = path_transform_apply(path, - path_transform) - - elif current_path is not None: - path = path_transform_revert(current_path, - path_transform) - - props[PROP_PATH] = path - props[PROP_CURRENT_PATH] = current_path - props[PROP_PATH_TRANSFORM] = path_transform - parent_path = props.get(PROP_PARENT_PATH) - if not parent_path: - return - - if not path_transform_applicable(parent_path, path_transform): - props[PROP_CURRENT_PARENT_PATH] = parent_path - return - - props[PROP_CURRENT_PARENT_PATH] = path_transform_apply( - parent_path, path_transform) - - def __set_current_path(self, props, update=False): - """Given a set of linked image properties, the image paths - stored within those properties may not match the actual image - paths if we're executing within an alternate root environment. - To deal with this situation we create temporal in-memory - properties that represent the current path to the image, and a - transform that allows us to translate between the current path - and the original path.""" - - current_path = self.__root - path_transform = compute_path_transform(props[PROP_PATH], - current_path) - - self.set_path_transform(props, path_transform, - current_path=current_path, update=update) - - def __guess_path_transform(self, ignore_errors=False): - """If we're initializing parent linked image properties for - the first time (or if those properties somehow got deleted) - then we need to know if the parent image that we're currently - operating on is located within an alternate root. One way to - do this is to ask our linked image plugins if they can - determine this (the zones linked image plugin usually can - if the image is a global zone).""" - - # ask each plugin if we're operating in an alternate root - p_transforms = [] - for plugin, lip in six.iteritems(self.__plugins): - p_transform = lip.guess_path_transform( - ignore_errors=ignore_errors) - if p_transform is not PATH_TRANSFORM_NONE: - p_transforms.append((plugin, p_transform)) - - if not p_transforms: - # no transform suggested by plugins - return PATH_TRANSFORM_NONE - - # check for conflicting transforms - transforms = list(set([ - p_transform - # Unused variable; pylint: disable=W0612 - for pname, p_transform in p_transforms - # pylint: enable=W0612 - ])) - - if len(transforms) == 1: - # we have a transform from our plugins - return transforms[0] - - # we have conflicting transforms, time to die - _rterr(li=self, multiple_transforms=p_transforms) - - def __fabricate_parent_props(self, ignore_errors=False): - """Fabricate the minimum set of properties required for a - parent image.""" - - # ask our plugins if we're operating with alternate image paths - path_transform = self.__guess_path_transform( - ignore_errors=ignore_errors) - - props = dict() - self.set_path_transform(props, path_transform, - current_path=self.__root) - return props - - def __load_ondisk_props(self, tmp=True): - """Load linked image properties from disk and return them to - the caller. We sanity check the properties, but we don't - update any internal linked image state. - - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" - - path = self.__path_prop - path_tmp = "{0}.{1:d}".format(self.__path_prop, - global_settings.client_runid) - - # read the linked image properties from disk - if tmp and path_exists(path_tmp): - path = path_tmp - props = load_data(path) - elif path_exists(path): - props = load_data(path) - else: - return None - - # make sure there are no saved temporal properties - assert not set(props) & temporal_props - - if PROP_NAME in props: - # convert PROP_NAME into a linked image name obj - name = props[PROP_NAME] - try: - lin = LinkedImageName(name) - props[PROP_NAME] = lin - except apx.LinkedImageException: - _rterr(path=self.__root, - bad_prop=(PROP_NAME, name)) - - # sanity check our properties - self.__verify_props(props) - return props - - def __load_ondisk_pfacets(self, tmp=True): - """Load linked image inherited facets from disk. - Don't update any internal state. - - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" - - pfacets = misc.EmptyDict - path = "{0}.{1:d}".format(self.__path_pfacets, - global_settings.client_runid) - if tmp and path_exists(path): - pfacets = load_data(path) - else: - path = self.__path_pfacets - pfacets = load_data(path, missing_ok=True) - - if pfacets is None: - return None - - rv = pkg.facet.Facets() - for k, v in six.iteritems(pfacets): - # W0212 Access to a protected member - # pylint: disable=W0212 - rv._set_inherited(k, v) - return rv - - def __load_ondisk_ppkgs(self, tmp=True): - """Load linked image parent packages from disk. - Don't update any internal state. - - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" - - fmri_strs = None - path = "{0}.{1:d}".format(self.__path_ppkgs, - global_settings.client_runid) - if tmp and path_exists(path): - fmri_strs = load_data(path) - else: - path = self.__path_ppkgs - fmri_strs = load_data(path, missing_ok=True) - - if fmri_strs is None: - return None - - return frozenset([ - pkg.fmri.PkgFmri(str(s)) - for s in fmri_strs - ]) - - def __load_ondisk_ppubs(self, tmp=True): - """Load linked image parent publishers from disk. - Don't update any internal state. - - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" - - ppubs = None - path = "{0}.{1:d}".format(self.__path_ppubs, - global_settings.client_runid) - if tmp and path_exists(path): - ppubs = load_data(path) - else: - path = self.__path_ppubs - ppubs = load_data(path, missing_ok=True) - - return ppubs - - def __load(self, tmp=True): - """Load linked image properties and constraints from disk. - Update the linked image internal state with the loaded data.""" - - # - # Normally, if we're a parent image we'll have linked image - # properties stored on disk. So load those now. - # - # If no properties are loaded, we may still be a parent image - # that is just missing it's metadata. (oops.) We attempt to - # detect this situation by invoking __isparent(), which will - # ask each child if there are any children. This is a best - # effort attempt, so when we do this we ignore any plugin - # runtime errors since we really want Image object - # initialization to succeed. If we don't have any linked - # image metadata, and we're having runtime errors querying for - # children, then we'll allow initialization here, but any - # subsequent operation that tries to access children will fail - # and the caller will have to specify that they want to ignore - # all children to allow the operation to succeed. - # - props = self.__load_ondisk_props(tmp=tmp) - if not props and not self.__isparent(ignore_errors=True): - # we're not linked - return - - if not props: - # - # Oops. We're a parent image with no properties - # stored on disk. Rather than throwing an exception - # try to fabricate up some props with reasonably - # guessed values which the user can subsequently - # change and/or fix. - # - props = self.__fabricate_parent_props( - ignore_errors=True) - else: - self.__set_current_path(props) - - self.__update_props(props) - - if not self.ischild(): - return - - # load parent packages. if parent package data is missing just - # continue along and hope for the best. - ppkgs = self.__load_ondisk_ppkgs(tmp=tmp) - if ppkgs is not None: - self.__ppkgs = ppkgs - - # load inherited facets. if inherited facet data is missing - # just continue along and hope for the best. - pfacets = self.__load_ondisk_pfacets(tmp=tmp) - if pfacets is not None: - self.__pfacets = pfacets - - # load parent publisher data. if publisher data is missing - # continue along and we'll just skip the publisher checks, - # it's better than failing and preventing any image updates. - self.__ppubs = self.__load_ondisk_ppubs(tmp=tmp) - - @staticmethod - def __validate_prop_recurse(v): - """Verify property value for PROP_RECURSE.""" - if v in [True, False]: - return True - if type(v) == str and v.lower() in ["true", "false"]: - return True - return False - - def __validate_attach_props(self, model, props): - """Validate user supplied linked image attach properties. - Don't update any internal state.""" - - # make sure that only attach time options have been - # specified, and that they have allowed values. - validate_props = { - PROP_RECURSE: self.__validate_prop_recurse - } - - if model == PV_MODEL_PUSH: - allowed_props = self.__push_child_props - else: - assert model == PV_MODEL_PULL - allowed_props = self.__pull_child_props + ] + ) + + # make sure there is no invalid overlap + assert not ( + temporal_props + & (__parent_props | __pull_child_props | __push_child_props) + ) + + def __init__(self, img): + """Initialize a new LinkedImage object.""" + + # globals + self.__img = img + + # variables reset by self.__update_props() + self.__props = dict() + self.__ppkgs = frozenset() + self.__ppubs = None + self.__pfacets = pkg.facet.Facets() + self.__pimg = None + + # variables reset by self.__recursion_init() + self.__lic_ignore = None + self.__lic_dict = {} + + # variables reset by self._init_root() + self.__root = None + self.__path_ppkgs = None + self.__path_prop = None + self.__path_ppubs = None + self.__path_pfacets = None + self.__img_insync = True + + # initialize with no properties + self.__update_props() + + # initialize linked image plugin objects + self.__plugins = dict() + for p in pkg.client.linkedimage.p_types: + self.__plugins[p] = pkg.client.linkedimage.p_classes[p](p, self) + + # if the image has a path setup, we can load data from it. + if self.__img.imgdir: + self._init_root() + + @property + def image(self): + """Get a pointer to the image object associated with this + linked image object.""" + return self.__img + + def _init_root(self): + """Called during object initialization and by + image.py`__set_root() to let us know when we're changing the + root location of the image. (The only time we change the root + path is when changes BEs during operations which clone BEs. + So when this happens most our metadata shouldn't actually + change.""" + + assert self.__img.root, "root = {0}".format(str(self.__img.root)) + assert self.__img.imgdir, "imgdir = {0}".format(str(self.__img.imgdir)) + + # Check if this is our first time accessing the current image + # or if we're just re-initializing ourselves. + first_pass = self.__root is None + + # figure out the new root image path + root = self.__img.root.rstrip(os.sep) + os.sep + + # initialize paths for linked image data files + self.__root = root + imgdir = self.__img.imgdir.rstrip(os.sep) + os.sep + self.__path_ppkgs = os.path.join(imgdir, PATH_PPKGS) + self.__path_prop = os.path.join(imgdir, PATH_PROP) + self.__path_ppubs = os.path.join(imgdir, PATH_PUBS) + self.__path_pfacets = os.path.join(imgdir, PATH_PFACETS) + + # if this isn't a reset, then load data from the image + if first_pass: + # the first time around we load non-temporary data (if + # there is any) so that we can audit ourselves and see + # if we're in currently in sync. + self.__load(tmp=False) + if self.ischild(): + self.__img_insync = self.__insync() + + # now re-load all the data taking into account any + # temporary new data associated with an in-progress + # operation. + self.__load() + + # if we're not linked we're done + if not self.__props: + return + + # if this is a reset, update temporal properties + if not first_pass: + self.__set_current_path(self.__props, update=True) + + # Tell linked image plugins about the updated paths + # Unused variable 'plugin'; pylint: disable=W0612 + for plugin, lip in six.iteritems(self.__plugins): + # pylint: enable=W0612 + lip.init_root(root) + + # Tell linked image children about the updated paths + for lic in six.itervalues(self.__lic_dict): + lic.child_init_root() + + def __update_props(self, props=None): + """Internal helper routine used when we want to update any + linked image properties. This routine sanity check the + new properties, updates them, and resets any cached state + that is affected by property values.""" + + if props is None: + props = dict() + elif props: + self.__verify_props(props) + + # all temporal properties must exist + for p in temporal_props: + # PROP_CURRENT_PARENT_PATH can only be set if + # we have PROP_PARENT_PATH. + if ( + p is PROP_CURRENT_PARENT_PATH + and PROP_PARENT_PATH not in props + ): + continue + assert p in props, "'{0}' not in {1}".format(p, set(props)) + + # update state + self.__props = props + self.__ppkgs = frozenset() + self.__ppubs = None + self.__pfacets = pkg.facet.Facets() + self.__pimg = None + + def __verify_props(self, props): + """Perform internal consistency checks for a set of linked + image properties. Don't update any state.""" + + props_set = set(props) + + # if we're not a child image ourselves, then we're done + if (props_set - temporal_props) == self.__parent_props: + return props + + # make sure PROP_MODEL was specified + if PROP_NAME not in props: + _rterr(path=self.__root, missing_props=[PROP_NAME]) + + # validate the linked image name + try: + lin = LinkedImageName(str(props[PROP_NAME])) + except apx.LinkedImageException: + _rterr(path=self.__root, bad_prop=(PROP_NAME, props[PROP_NAME])) + + if lin.lin_type not in self.__plugins: + _rterr(path=self.__root, lin=lin, bad_lin_type=lin.lin_type) + + # make sure PROP_MODEL was specified + if PROP_MODEL not in props: + _rterr(path=self.__root, lin=lin, missing_props=[PROP_MODEL]) + + model = props[PROP_MODEL] + if model not in model_values: + _rterr(path=self.__root, lin=lin, bad_prop=(PROP_MODEL, model)) + + if model == PV_MODEL_PUSH: + missing = self.__push_child_props - props_set + if missing: + _rterr(path=self.__root, lin=lin, missing_props=missing) + + if model == PV_MODEL_PULL: + missing = self.__pull_child_props - props_set + if missing: + _rterr(path=self.__root, lin=lin, missing_props=missing) + + @staticmethod + def set_path_transform( + props, path_transform, path=None, current_path=None, update=False + ): + """Given a new path_transform, update path properties.""" + + if update: + assert ( + set(props) & temporal_props + ), "no temporal properties are set: {0}".format(props) + else: + assert not ( + set(props) & temporal_props + ), "temporal properties already set: {0}".format(props) + + # Either 'path' or 'current_path' must be specified. + assert path is None or current_path is None + assert path is not None or current_path is not None + + if path is not None: + current_path = path_transform_apply(path, path_transform) + + elif current_path is not None: + path = path_transform_revert(current_path, path_transform) + + props[PROP_PATH] = path + props[PROP_CURRENT_PATH] = current_path + props[PROP_PATH_TRANSFORM] = path_transform + parent_path = props.get(PROP_PARENT_PATH) + if not parent_path: + return + + if not path_transform_applicable(parent_path, path_transform): + props[PROP_CURRENT_PARENT_PATH] = parent_path + return + + props[PROP_CURRENT_PARENT_PATH] = path_transform_apply( + parent_path, path_transform + ) + + def __set_current_path(self, props, update=False): + """Given a set of linked image properties, the image paths + stored within those properties may not match the actual image + paths if we're executing within an alternate root environment. + To deal with this situation we create temporal in-memory + properties that represent the current path to the image, and a + transform that allows us to translate between the current path + and the original path.""" + + current_path = self.__root + path_transform = compute_path_transform(props[PROP_PATH], current_path) + + self.set_path_transform( + props, path_transform, current_path=current_path, update=update + ) + + def __guess_path_transform(self, ignore_errors=False): + """If we're initializing parent linked image properties for + the first time (or if those properties somehow got deleted) + then we need to know if the parent image that we're currently + operating on is located within an alternate root. One way to + do this is to ask our linked image plugins if they can + determine this (the zones linked image plugin usually can + if the image is a global zone).""" + + # ask each plugin if we're operating in an alternate root + p_transforms = [] + for plugin, lip in six.iteritems(self.__plugins): + p_transform = lip.guess_path_transform(ignore_errors=ignore_errors) + if p_transform is not PATH_TRANSFORM_NONE: + p_transforms.append((plugin, p_transform)) + + if not p_transforms: + # no transform suggested by plugins + return PATH_TRANSFORM_NONE + + # check for conflicting transforms + transforms = list( + set( + [ + p_transform + # Unused variable; pylint: disable=W0612 + for pname, p_transform in p_transforms + # pylint: enable=W0612 + ] + ) + ) + + if len(transforms) == 1: + # we have a transform from our plugins + return transforms[0] + + # we have conflicting transforms, time to die + _rterr(li=self, multiple_transforms=p_transforms) + + def __fabricate_parent_props(self, ignore_errors=False): + """Fabricate the minimum set of properties required for a + parent image.""" + + # ask our plugins if we're operating with alternate image paths + path_transform = self.__guess_path_transform( + ignore_errors=ignore_errors + ) + + props = dict() + self.set_path_transform(props, path_transform, current_path=self.__root) + return props + + def __load_ondisk_props(self, tmp=True): + """Load linked image properties from disk and return them to + the caller. We sanity check the properties, but we don't + update any internal linked image state. + + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" + + path = self.__path_prop + path_tmp = "{0}.{1:d}".format( + self.__path_prop, global_settings.client_runid + ) + + # read the linked image properties from disk + if tmp and path_exists(path_tmp): + path = path_tmp + props = load_data(path) + elif path_exists(path): + props = load_data(path) + else: + return None - errs = [] + # make sure there are no saved temporal properties + assert not set(props) & temporal_props - # check each property the user specified. - for k, v in six.iteritems(props): + if PROP_NAME in props: + # convert PROP_NAME into a linked image name obj + name = props[PROP_NAME] + try: + lin = LinkedImageName(name) + props[PROP_NAME] = lin + except apx.LinkedImageException: + _rterr(path=self.__root, bad_prop=(PROP_NAME, name)) + + # sanity check our properties + self.__verify_props(props) + return props + + def __load_ondisk_pfacets(self, tmp=True): + """Load linked image inherited facets from disk. + Don't update any internal state. + + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" + + pfacets = misc.EmptyDict + path = "{0}.{1:d}".format( + self.__path_pfacets, global_settings.client_runid + ) + if tmp and path_exists(path): + pfacets = load_data(path) + else: + path = self.__path_pfacets + pfacets = load_data(path, missing_ok=True) + + if pfacets is None: + return None + + rv = pkg.facet.Facets() + for k, v in six.iteritems(pfacets): + # W0212 Access to a protected member + # pylint: disable=W0212 + rv._set_inherited(k, v) + return rv + + def __load_ondisk_ppkgs(self, tmp=True): + """Load linked image parent packages from disk. + Don't update any internal state. + + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" + + fmri_strs = None + path = "{0}.{1:d}".format( + self.__path_ppkgs, global_settings.client_runid + ) + if tmp and path_exists(path): + fmri_strs = load_data(path) + else: + path = self.__path_ppkgs + fmri_strs = load_data(path, missing_ok=True) - # did the user specify an allowable property? - if k not in validate_props: - errs.append(apx.LinkedImageException( - attach_bad_prop=k)) - continue + if fmri_strs is None: + return None - # did the user specify a valid property value? - if not validate_props[k](v): - errs.append(apx.LinkedImageException( - attach_bad_prop_value=(k, v))) - continue + return frozenset([pkg.fmri.PkgFmri(str(s)) for s in fmri_strs]) - # is this property valid for this type of image? - if k not in allowed_props: - errs.append(apx.LinkedImageException( - attach_bad_prop=k)) - continue + def __load_ondisk_ppubs(self, tmp=True): + """Load linked image parent publishers from disk. + Don't update any internal state. - if len(errs) == 1: - raise errs[0] - if errs: - raise apx.LinkedImageException(bundle=errs) + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" - def __init_pimg(self, path): - """Initialize an Image object which can be used to access a - parent image.""" + ppubs = None + path = "{0}.{1:d}".format( + self.__path_ppubs, global_settings.client_runid + ) + if tmp and path_exists(path): + ppubs = load_data(path) + else: + path = self.__path_ppubs + ppubs = load_data(path, missing_ok=True) - try: - os.stat(path) - except OSError: - raise apx.LinkedImageException(parent_bad_path=path) + return ppubs - try: - pimg = self.__img.alloc( - root=path, - user_provided_dir=True, - cmdpath=self.__img.cmdpath) - except apx.ImageNotFoundException: - raise apx.LinkedImageException(parent_bad_img=path) - - return pimg - - def nothingtodo(self): - """If our in-memory linked image state matches the on-disk - linked image state then there's nothing to do. If the state - differs then there is stuff to do since the new state needs - to be saved to disk.""" - - # check if we're not a linked image. - if not self.isparent() and not self.ischild(): - # if any linked image metadata files exist they need - # to be deleted. - paths = [ - self.__path_pfacets, - self.__path_ppkgs, - self.__path_ppubs, - self.__path_prop, - ] - for path in paths: - if path_exists(path): - return False - return True - - # compare in-memory and on-disk properties - li_ondisk_props = self.__load_ondisk_props(tmp=False) - if li_ondisk_props is None: - li_ondisk_props = dict() - li_inmemory_props = rm_dict_ent(self.__props, - temporal_props) - if li_ondisk_props != li_inmemory_props: - return False - - # linked image metadata files with inherited data - paths = [ - self.__path_pfacets, - self.__path_ppkgs, - self.__path_ppubs, - ] + def __load(self, tmp=True): + """Load linked image properties and constraints from disk. + Update the linked image internal state with the loaded data.""" - # check if we're just a parent image. - if not self.ischild(): - # parent images only have properties. if any linked - # image metadata files that contain inherited - # information exist they need to be deleted. - for path in paths: - if path_exists(path): - return False - return True - - # if we're missing any metadata files then there's work todo - for path in paths: - if not path_exists(path): - return False - - # compare in-memory and on-disk inherited facets - li_ondisk_pfacets = self.__load_ondisk_pfacets(tmp=False) - if self.__pfacets != li_ondisk_pfacets: - return False - - # compare in-memory and on-disk parent packages - li_ondisk_ppkgs = self.__load_ondisk_ppkgs(tmp=False) - if self.__ppkgs != li_ondisk_ppkgs: - return False - - # compare in-memory and on-disk parent publishers - li_ondisk_ppubs = self.__load_ondisk_ppubs(tmp=False) - if self.__ppubs != li_ondisk_ppubs: - return False + # + # Normally, if we're a parent image we'll have linked image + # properties stored on disk. So load those now. + # + # If no properties are loaded, we may still be a parent image + # that is just missing it's metadata. (oops.) We attempt to + # detect this situation by invoking __isparent(), which will + # ask each child if there are any children. This is a best + # effort attempt, so when we do this we ignore any plugin + # runtime errors since we really want Image object + # initialization to succeed. If we don't have any linked + # image metadata, and we're having runtime errors querying for + # children, then we'll allow initialization here, but any + # subsequent operation that tries to access children will fail + # and the caller will have to specify that they want to ignore + # all children to allow the operation to succeed. + # + props = self.__load_ondisk_props(tmp=tmp) + if not props and not self.__isparent(ignore_errors=True): + # we're not linked + return + + if not props: + # + # Oops. We're a parent image with no properties + # stored on disk. Rather than throwing an exception + # try to fabricate up some props with reasonably + # guessed values which the user can subsequently + # change and/or fix. + # + props = self.__fabricate_parent_props(ignore_errors=True) + else: + self.__set_current_path(props) + + self.__update_props(props) + + if not self.ischild(): + return + + # load parent packages. if parent package data is missing just + # continue along and hope for the best. + ppkgs = self.__load_ondisk_ppkgs(tmp=tmp) + if ppkgs is not None: + self.__ppkgs = ppkgs + + # load inherited facets. if inherited facet data is missing + # just continue along and hope for the best. + pfacets = self.__load_ondisk_pfacets(tmp=tmp) + if pfacets is not None: + self.__pfacets = pfacets + + # load parent publisher data. if publisher data is missing + # continue along and we'll just skip the publisher checks, + # it's better than failing and preventing any image updates. + self.__ppubs = self.__load_ondisk_ppubs(tmp=tmp) + + @staticmethod + def __validate_prop_recurse(v): + """Verify property value for PROP_RECURSE.""" + if v in [True, False]: + return True + if type(v) == str and v.lower() in ["true", "false"]: + return True + return False - return True + def __validate_attach_props(self, model, props): + """Validate user supplied linked image attach properties. + Don't update any internal state.""" - def pubcheck(self): - """If we're a child image's, verify that the parent image - publisher configuration is a subset of the child images - publisher configuration. This means that all publishers - configured within the parent image must also be configured - within the child image with the same: - - - publisher rank - - sticky and disabled settings - - The child image may have additional publishers configured but - they must all be lower ranked than the parent's publishers. - """ - - # if we're not a child image then bail - if not self.ischild(): - return - - # if we're using the sysrepo then don't bother - if self.__img.cfg.get_policy("use-system-repo"): - return - - pubs = get_pubs(self.__img) - ppubs = self.__ppubs - - if ppubs is None: - # parent publisher data is missing, press on and hope - # for the best. - return - - # child image needs at least as many publishers as the parent - if len(pubs) < len(ppubs): - raise apx.PlanCreationException( - linked_pub_error=(pubs, ppubs)) - - # check rank, sticky, and disabled settings - for (p, pp) in zip(pubs, ppubs): - if p == pp: - continue - raise apx.PlanCreationException( - linked_pub_error=(pubs, ppubs)) - - def __syncmd_from_parent(self): - """Update linked image constraint, publisher data, and - state from our parent image.""" - - if not self.ischild(): - # we're not a child image, nothing to do - return - - if self.__props[PROP_MODEL] == PV_MODEL_PUSH: - # parent pushes data to us, nothing to do - return - - # initialize the parent image - if not self.__pimg: - path = self.parent_path() - self.__pimg = self.__init_pimg(path) - - # get metadata from our parent image - self.__ppubs = get_pubs(self.__pimg) - self.__ppkgs = get_packages(self.__pimg) - self.__pfacets = get_inheritable_facets(self.__pimg) - - def syncmd_from_parent(self, catch_exception=False): - """Update linked image constraint, publisher data, and state - from our parent image. If catch_exception is true catch any - linked image exceptions and pack them up in a linked image - return value tuple.""" + # make sure that only attach time options have been + # specified, and that they have allowed values. + validate_props = {PROP_RECURSE: self.__validate_prop_recurse} - try: - self.__syncmd_from_parent() - except apx.LinkedImageException as e: - if not catch_exception: - raise e - return LI_RVTuple(e.lix_exitrv, e, None) - return + if model == PV_MODEL_PUSH: + allowed_props = self.__push_child_props + else: + assert model == PV_MODEL_PULL + allowed_props = self.__pull_child_props + + errs = [] + + # check each property the user specified. + for k, v in six.iteritems(props): + # did the user specify an allowable property? + if k not in validate_props: + errs.append(apx.LinkedImageException(attach_bad_prop=k)) + continue + + # did the user specify a valid property value? + if not validate_props[k](v): + errs.append( + apx.LinkedImageException(attach_bad_prop_value=(k, v)) + ) + continue + + # is this property valid for this type of image? + if k not in allowed_props: + errs.append(apx.LinkedImageException(attach_bad_prop=k)) + continue + + if len(errs) == 1: + raise errs[0] + if errs: + raise apx.LinkedImageException(bundle=errs) + + def __init_pimg(self, path): + """Initialize an Image object which can be used to access a + parent image.""" - def syncmd(self): - """Write in-memory linked image state to disk.""" + try: + os.stat(path) + except OSError: + raise apx.LinkedImageException(parent_bad_path=path) - # create a list of metadata file paths - paths = [ - self.__path_pfacets, - self.__path_ppkgs, - self.__path_ppubs, - self.__path_prop, - ] + try: + pimg = self.__img.alloc( + root=path, user_provided_dir=True, cmdpath=self.__img.cmdpath + ) + except apx.ImageNotFoundException: + raise apx.LinkedImageException(parent_bad_img=path) + + return pimg + + def nothingtodo(self): + """If our in-memory linked image state matches the on-disk + linked image state then there's nothing to do. If the state + differs then there is stuff to do since the new state needs + to be saved to disk.""" + + # check if we're not a linked image. + if not self.isparent() and not self.ischild(): + # if any linked image metadata files exist they need + # to be deleted. + paths = [ + self.__path_pfacets, + self.__path_ppkgs, + self.__path_ppubs, + self.__path_prop, + ] + for path in paths: + if path_exists(path): + return False + return True + + # compare in-memory and on-disk properties + li_ondisk_props = self.__load_ondisk_props(tmp=False) + if li_ondisk_props is None: + li_ondisk_props = dict() + li_inmemory_props = rm_dict_ent(self.__props, temporal_props) + if li_ondisk_props != li_inmemory_props: + return False + + # linked image metadata files with inherited data + paths = [ + self.__path_pfacets, + self.__path_ppkgs, + self.__path_ppubs, + ] - # cleanup any temporary files - for path in paths: - path = "{0}.{1:d}".format(path, - global_settings.client_runid) - path_unlink(path, noent_ok=True) + # check if we're just a parent image. + if not self.ischild(): + # parent images only have properties. if any linked + # image metadata files that contain inherited + # information exist they need to be deleted. + for path in paths: + if path_exists(path): + return False + return True + + # if we're missing any metadata files then there's work todo + for path in paths: + if not path_exists(path): + return False - if not self.ischild() and not self.isparent(): - # we're no longer linked; delete metadata - for path in paths: - path_unlink(path, noent_ok=True) - return + # compare in-memory and on-disk inherited facets + li_ondisk_pfacets = self.__load_ondisk_pfacets(tmp=False) + if self.__pfacets != li_ondisk_pfacets: + return False - # save our properties, but first remove any temporal properties - props = rm_dict_ent(self.__props, temporal_props) - save_data(self.__path_prop, props) + # compare in-memory and on-disk parent packages + li_ondisk_ppkgs = self.__load_ondisk_ppkgs(tmp=False) + if self.__ppkgs != li_ondisk_ppkgs: + return False - if not self.ischild(): - # if we're not a child we don't have parent data - path_unlink(self.__path_pfacets, noent_ok=True) - path_unlink(self.__path_ppkgs, noent_ok=True) - path_unlink(self.__path_ppubs, noent_ok=True) - return + # compare in-memory and on-disk parent publishers + li_ondisk_ppubs = self.__load_ondisk_ppubs(tmp=False) + if self.__ppubs != li_ondisk_ppubs: + return False - # we're a child so save our latest constraints - save_data(self.__path_pfacets, self.__pfacets) - save_data(self.__path_ppkgs, self.__ppkgs) - save_data(self.__path_ppubs, self.__ppubs) + return True - @property - def child_name(self): - """If the current image is a child image, this function - returns a linked image name object which represents the name - of the current image.""" + def pubcheck(self): + """If we're a child image's, verify that the parent image + publisher configuration is a subset of the child images + publisher configuration. This means that all publishers + configured within the parent image must also be configured + within the child image with the same: - if not self.ischild(): - raise self.__apx_not_child() - return self.__props[PROP_NAME] + - publisher rank + - sticky and disabled settings - def ischild(self): - """Indicates whether the current image is a child image.""" + The child image may have additional publishers configured but + they must all be lower ranked than the parent's publishers. + """ - return PROP_NAME in self.__props + # if we're not a child image then bail + if not self.ischild(): + return - def __isparent(self, ignore_errors=False): - """Indicates whether the current image is a parent image. + # if we're using the sysrepo then don't bother + if self.__img.cfg.get_policy("use-system-repo"): + return - 'ignore_plugin_errors' ignore plugin runtime errors when - trying to determine if we're a parent image. - """ + pubs = get_pubs(self.__img) + ppubs = self.__ppubs - return len(self.__list_children( - ignore_errors=ignore_errors)) > 0 + if ppubs is None: + # parent publisher data is missing, press on and hope + # for the best. + return - def isparent(self, li_ignore=None): - """Indicates whether the current image is a parent image.""" + # child image needs at least as many publishers as the parent + if len(pubs) < len(ppubs): + raise apx.PlanCreationException(linked_pub_error=(pubs, ppubs)) - return len(self.__list_children(li_ignore=li_ignore)) > 0 + # check rank, sticky, and disabled settings + for p, pp in zip(pubs, ppubs): + if p == pp: + continue + raise apx.PlanCreationException(linked_pub_error=(pubs, ppubs)) - def islinked(self): - """Indicates wether the current image is already linked.""" - return self.ischild() or self.isparent() + def __syncmd_from_parent(self): + """Update linked image constraint, publisher data, and + state from our parent image.""" - def get_path_transform(self): - """Return the current path transform property.""" + if not self.ischild(): + # we're not a child image, nothing to do + return - return self.__props.get( - PROP_PATH_TRANSFORM, PATH_TRANSFORM_NONE) + if self.__props[PROP_MODEL] == PV_MODEL_PUSH: + # parent pushes data to us, nothing to do + return - def inaltroot(self): - """Check if we're accessing a linked image at an alternate - location/path.""" + # initialize the parent image + if not self.__pimg: + path = self.parent_path() + self.__pimg = self.__init_pimg(path) - return self.get_path_transform() != PATH_TRANSFORM_NONE + # get metadata from our parent image + self.__ppubs = get_pubs(self.__pimg) + self.__ppkgs = get_packages(self.__pimg) + self.__pfacets = get_inheritable_facets(self.__pimg) - def path(self): - """Report our current image path.""" + def syncmd_from_parent(self, catch_exception=False): + """Update linked image constraint, publisher data, and state + from our parent image. If catch_exception is true catch any + linked image exceptions and pack them up in a linked image + return value tuple.""" - assert self.islinked() - return self.__props[PROP_PATH] + try: + self.__syncmd_from_parent() + except apx.LinkedImageException as e: + if not catch_exception: + raise e + return LI_RVTuple(e.lix_exitrv, e, None) + return + + def syncmd(self): + """Write in-memory linked image state to disk.""" + + # create a list of metadata file paths + paths = [ + self.__path_pfacets, + self.__path_ppkgs, + self.__path_ppubs, + self.__path_prop, + ] - def current_path(self): - """Report our current image path.""" + # cleanup any temporary files + for path in paths: + path = "{0}.{1:d}".format(path, global_settings.client_runid) + path_unlink(path, noent_ok=True) + + if not self.ischild() and not self.isparent(): + # we're no longer linked; delete metadata + for path in paths: + path_unlink(path, noent_ok=True) + return + + # save our properties, but first remove any temporal properties + props = rm_dict_ent(self.__props, temporal_props) + save_data(self.__path_prop, props) + + if not self.ischild(): + # if we're not a child we don't have parent data + path_unlink(self.__path_pfacets, noent_ok=True) + path_unlink(self.__path_ppkgs, noent_ok=True) + path_unlink(self.__path_ppubs, noent_ok=True) + return + + # we're a child so save our latest constraints + save_data(self.__path_pfacets, self.__pfacets) + save_data(self.__path_ppkgs, self.__ppkgs) + save_data(self.__path_ppubs, self.__ppubs) + + @property + def child_name(self): + """If the current image is a child image, this function + returns a linked image name object which represents the name + of the current image.""" + + if not self.ischild(): + raise self.__apx_not_child() + return self.__props[PROP_NAME] + + def ischild(self): + """Indicates whether the current image is a child image.""" + + return PROP_NAME in self.__props + + def __isparent(self, ignore_errors=False): + """Indicates whether the current image is a parent image. + + 'ignore_plugin_errors' ignore plugin runtime errors when + trying to determine if we're a parent image. + """ - assert self.islinked() - return self.__props[PROP_CURRENT_PATH] + return len(self.__list_children(ignore_errors=ignore_errors)) > 0 - def parent_path(self): - """If we know where our parent should be, report it's expected - location.""" + def isparent(self, li_ignore=None): + """Indicates whether the current image is a parent image.""" - if PROP_PARENT_PATH not in self.__props: - return None + return len(self.__list_children(li_ignore=li_ignore)) > 0 - path = self.__props[PROP_CURRENT_PARENT_PATH] - assert path[-1] == "/" - return path + def islinked(self): + """Indicates wether the current image is already linked.""" + return self.ischild() or self.isparent() - def child_props(self, lin=None): - """Return a dictionary which represents the linked image - properties associated with a linked image. + def get_path_transform(self): + """Return the current path transform property.""" - 'lin' is the name of the child image. If lin is None then - the current image is assumed to be a linked image and it's - properties are returned. + return self.__props.get(PROP_PATH_TRANSFORM, PATH_TRANSFORM_NONE) - Always returns a copy of the properties in case the caller - tries to update them.""" + def inaltroot(self): + """Check if we're accessing a linked image at an alternate + location/path.""" - if lin is None: - # If we're not linked we'll return an empty - # dictionary. That's ok. - return self.__props.copy() + return self.get_path_transform() != PATH_TRANSFORM_NONE - # make sure the specified child exists - self.__verify_child_name(lin, raise_except=True) + def path(self): + """Report our current image path.""" - # make a copy of the props in case they are updated - lip = self.__plugins[lin.lin_type] - props = lip.get_child_props(lin).copy() + assert self.islinked() + return self.__props[PROP_PATH] - # add temporal properties - self.set_path_transform(props, self.get_path_transform(), - path=props[PROP_PATH]) - return props + def current_path(self): + """Report our current image path.""" - def __apx_not_child(self): - """Raise an exception because the current image is not a child - image.""" + assert self.islinked() + return self.__props[PROP_CURRENT_PATH] - return apx.LinkedImageException(self_not_child=self.__root) + def parent_path(self): + """If we know where our parent should be, report it's expected + location.""" - def __verify_child_name(self, lin, raise_except=False): - """Check if a specific child image exists.""" + if PROP_PARENT_PATH not in self.__props: + return None - assert type(lin) == LinkedImageName, \ - "{0} == LinkedImageName".format(type(lin)) + path = self.__props[PROP_CURRENT_PARENT_PATH] + assert path[-1] == "/" + return path - for i in self.__list_children(): - if i[0] == lin: - return True + def child_props(self, lin=None): + """Return a dictionary which represents the linked image + properties associated with a linked image. - if raise_except: - raise apx.LinkedImageException(child_unknown=lin) - return False + 'lin' is the name of the child image. If lin is None then + the current image is assumed to be a linked image and it's + properties are returned. - def verify_names(self, lin_list): - """Given a list of linked image name objects, make sure all - the children exist.""" + Always returns a copy of the properties in case the caller + tries to update them.""" - assert isinstance(lin_list, list), \ - "type(lin_list) == {0}, str(lin_list) == {1}".format( - type(lin_list), str(lin_list)) + if lin is None: + # If we're not linked we'll return an empty + # dictionary. That's ok. + return self.__props.copy() - for lin in lin_list: - self.__verify_child_name(lin, raise_except=True) + # make sure the specified child exists + self.__verify_child_name(lin, raise_except=True) - def inherited_facets(self): - """Facets inherited from our parent image.""" - return self.__pfacets + # make a copy of the props in case they are updated + lip = self.__plugins[lin.lin_type] + props = lip.get_child_props(lin).copy() - def parent_fmris(self): - """A set of the fmris installed in our parent image.""" + # add temporal properties + self.set_path_transform( + props, self.get_path_transform(), path=props[PROP_PATH] + ) + return props - if not self.ischild(): - # We return None since frozenset() would indicate - # that there are no packages installed in the parent - # image. - return None + def __apx_not_child(self): + """Raise an exception because the current image is not a child + image.""" - return self.__ppkgs + return apx.LinkedImageException(self_not_child=self.__root) - def parse_name(self, name, allow_unknown=False): - """Given a string representing a linked image child name, - returns linked image name object representing the same name. + def __verify_child_name(self, lin, raise_except=False): + """Check if a specific child image exists.""" - 'allow_unknown' indicates whether the name must represent - actual children or simply be syntactically correct.""" + assert type(lin) == LinkedImageName, "{0} == LinkedImageName".format( + type(lin) + ) - assert type(name) == str + for i in self.__list_children(): + if i[0] == lin: + return True - lin = LinkedImageName(name) - if not allow_unknown: - self.__verify_child_name(lin, raise_except=True) - return lin - - def __list_children(self, li_ignore=None, ignore_errors=False): - """Returns a list of linked child images associated with the - current image. - - 'li_ignore' see list_related() for a description. - - The returned value is a list of tuples where each tuple - contains (
  • ,
  • ).""" - - if li_ignore == []: - # ignore all children - return [] - - li_children = [] - for p in pkg.client.linkedimage.p_types: - for lin, path in self.__plugins[p].get_child_list( - ignore_errors=ignore_errors): - assert lin.lin_type == p - path = path_transform_apply(path, - self.get_path_transform()) - li_children.append([lin, path]) - - # sort by linked image name - li_children = sorted(li_children, key=operator.itemgetter(0)) - - if li_ignore is None: - # don't ignore any children - return li_children - - li_all = set([lin for lin, path in li_children]) - errs = [ - apx.LinkedImageException(child_unknown=lin) - for lin in (set(li_ignore) - li_all) - ] - if len(errs) == 1: - raise errs[0] - if errs: - raise apx.LinkedImageException(bundle=errs) - - return [ - (lin, path) - for lin, path in li_children - if lin not in li_ignore - ] + if raise_except: + raise apx.LinkedImageException(child_unknown=lin) + return False - def list_related(self, li_ignore=None): - """Returns a list of linked images associated with the - current image. This includes both child and parent images. + def verify_names(self, lin_list): + """Given a list of linked image name objects, make sure all + the children exist.""" - 'li_ignore' is either None or a list. If it's None (the - default), all children will be listed. If it's an empty list - no children will be listed. Otherwise, any children listed - in li_ignore will be ommited from the results. + assert isinstance( + lin_list, list + ), "type(lin_list) == {0}, str(lin_list) == {1}".format( + type(lin_list), str(lin_list) + ) - The returned value is a list of tuples where each tuple - contains (
  • , ,
  • ).""" + for lin in lin_list: + self.__verify_child_name(lin, raise_except=True) - li_children = self.__list_children(li_ignore=li_ignore) - li_list = [ - (lin, REL_CHILD, path) - for lin, path in li_children - ] + def inherited_facets(self): + """Facets inherited from our parent image.""" + return self.__pfacets - if not li_list and not self.ischild(): - # we're not linked - return [] + def parent_fmris(self): + """A set of the fmris installed in our parent image.""" - # we're linked so append ourself to the list - lin = PV_NAME_NONE - if self.ischild(): - lin = self.child_name + if not self.ischild(): + # We return None since frozenset() would indicate + # that there are no packages installed in the parent + # image. + return None - path = self.current_path() - li_self = (lin, REL_SELF, path) - li_list.append(li_self) + return self.__ppkgs - # if we have a path to our parent then append that as well. - path = self.parent_path() - if path is not None: - li_parent = (PV_NAME_NONE, REL_PARENT, path) - li_list.append(li_parent) + def parse_name(self, name, allow_unknown=False): + """Given a string representing a linked image child name, + returns linked image name object representing the same name. - # sort by linked image name - li_list = sorted(li_list, key=operator.itemgetter(0)) + 'allow_unknown' indicates whether the name must represent + actual children or simply be syntactically correct.""" - return li_list + assert type(name) == str - def attach_parent(self, lin, path, props, allow_relink=False, - force=False): - """We only update in-memory state; nothing is written to - disk, to sync linked image state to disk call syncmd.""" + lin = LinkedImageName(name) + if not allow_unknown: + self.__verify_child_name(lin, raise_except=True) + return lin - assert type(lin) == LinkedImageName - assert type(path) == str - assert props is None or type(props) == dict, \ - "type(props) == {0}".format(type(props)) - if props is None: - props = dict() + def __list_children(self, li_ignore=None, ignore_errors=False): + """Returns a list of linked child images associated with the + current image. - lip = self.__plugins[lin.lin_type] + 'li_ignore' see list_related() for a description. - if self.ischild() and not allow_relink: - raise apx.LinkedImageException(self_linked=self.__root) + The returned value is a list of tuples where each tuple + contains (
  • ,
  • ).""" - if not lip.support_attach and not force: - raise apx.LinkedImageException( - attach_parent_notsup=lin.lin_type) + if li_ignore == []: + # ignore all children + return [] - # Path must be an absolute path. - if not os.path.isabs(path): - raise apx.LinkedImageException(parent_bad_notabs=path) + li_children = [] + for p in pkg.client.linkedimage.p_types: + for lin, path in self.__plugins[p].get_child_list( + ignore_errors=ignore_errors + ): + assert lin.lin_type == p + path = path_transform_apply(path, self.get_path_transform()) + li_children.append([lin, path]) - # we don't bother to cleanup the path to the parent image here - # because when we allocate an Image object for the parent - # image, it will do that work for us. - pimg = self.__init_pimg(path) + # sort by linked image name + li_children = sorted(li_children, key=operator.itemgetter(0)) - # get the cleaned up parent image path. - path = pimg.root + if li_ignore is None: + # don't ignore any children + return li_children - # Make sure our parent image is at it's default path. (We - # don't allow attaching new images if an image is located at - # an alternate path.) - if pimg.linked.inaltroot(): - raise apx.LinkedImageException(attach_with_curpath=( - pimg.linked.path(), pimg.current_path())) + li_all = set([lin for lin, path in li_children]) + errs = [ + apx.LinkedImageException(child_unknown=lin) + for lin in (set(li_ignore) - li_all) + ] + if len(errs) == 1: + raise errs[0] + if errs: + raise apx.LinkedImageException(bundle=errs) - self.__validate_attach_props(PV_MODEL_PULL, props) - self.__validate_attach_img_paths(path, self.__root) + return [ + (lin, path) for lin, path in li_children if lin not in li_ignore + ] - # make a copy of the properties and update them - props = props.copy() - props[PROP_NAME] = lin - props[PROP_MODEL] = PV_MODEL_PULL - - # If we're in an alternate root, the parent must also be within - # that alternate root. - path_transform = self.get_path_transform() - if not path_transform_applied(path, path_transform): - raise apx.LinkedImageException( - parent_not_in_altroot=(path, path_transform[1])) - - # Set path related properties. We use self.__root in place of - # current_path() since we may not actually be linked yet. - props[PROP_PARENT_PATH] = path.rstrip(os.sep) + os.sep - self.set_path_transform(props, path_transform, - current_path=self.__root) - - for k, v in six.iteritems(lip.attach_props_def): - if k not in self.__pull_child_props: - # this prop doesn't apply to pull images - continue - if k not in props: - props[k] = v - - self.__update_props(props) - self.__pimg = pimg - - def detach_parent(self, force=False): - """We only update in memory state; nothing is written to - disk, to sync linked image state to disk call syncmd.""" - - lin = self.child_name - lip = self.__plugins[lin.lin_type] - if not force: - if self.__props[PROP_MODEL] == PV_MODEL_PUSH: - raise apx.LinkedImageException( - detach_from_parent=self.__root) - - if not lip.support_detach: - raise apx.LinkedImageException( - detach_parent_notsup=lin.lin_type) - - # Generate a new set of linked image properties. If we have - # no children then we don't need any more properties. - props = None - - # If we have children we'll need to keep some properties. - if self.isparent(): - strip = prop_values - \ - (self.__parent_props | temporal_props) - props = rm_dict_ent(self.__props, strip) - - # Update our linked image properties. - self.__update_props(props) - - def __insync(self): - """Determine if an image is in sync with its constraints.""" - - assert self.ischild() - - cat = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED) - excludes = self.__img.list_excludes() - - sync_fmris = [] - for fmri in cat.fmris(): - # get parent dependencies from the catalog - for f in itertools.chain.from_iterable( - a.attrlist("fmri") - for a in cat.get_entry_actions(fmri, - [pkg.catalog.Catalog.DEPENDENCY], - excludes=excludes) - if a.name == "depend" and - a.attrs["type"] == "parent" - ): - if f == pkg.actions.depend.DEPEND_SELF: - sync_fmris.append((fmri, fmri)) - else: - sync_fmris.append((fmri, - pkg.fmri.PkgFmri(f))) - - if not sync_fmris: - # No packages to sync - return True - - # create a dictionary of packages installed in the parent - ppkgs_dict = dict([ - (fmri.pkg_name, fmri) - for fmri in self.parent_fmris() - ]) - - for (pkg_fmri, fmri) in sync_fmris: - if fmri.pkg_name not in ppkgs_dict: - return False - # This intentionally mirrors the logic in - # __trim_nonmatching_parents1 in pkg_solver.py. - pf = ppkgs_dict[fmri.pkg_name] - if pf.version == fmri.version: - # parent dependency is satisfied, which applies - # to both DEPEND_SELF and other cases - continue - elif (pkg_fmri != fmri and - pf.version.is_successor(fmri.version, - pkg.version.CONSTRAINT_NONE)): - # *not* DEPEND_SELF; parent dependency is - # satisfied - continue - return False - return True + def list_related(self, li_ignore=None): + """Returns a list of linked images associated with the + current image. This includes both child and parent images. + + 'li_ignore' is either None or a list. If it's None (the + default), all children will be listed. If it's an empty list + no children will be listed. Otherwise, any children listed + in li_ignore will be ommited from the results. + + The returned value is a list of tuples where each tuple + contains (
  • , ,
  • ).""" + + li_children = self.__list_children(li_ignore=li_ignore) + li_list = [(lin, REL_CHILD, path) for lin, path in li_children] + + if not li_list and not self.ischild(): + # we're not linked + return [] + + # we're linked so append ourself to the list + lin = PV_NAME_NONE + if self.ischild(): + lin = self.child_name + + path = self.current_path() + li_self = (lin, REL_SELF, path) + li_list.append(li_self) + + # if we have a path to our parent then append that as well. + path = self.parent_path() + if path is not None: + li_parent = (PV_NAME_NONE, REL_PARENT, path) + li_list.append(li_parent) + + # sort by linked image name + li_list = sorted(li_list, key=operator.itemgetter(0)) + + return li_list + + def attach_parent(self, lin, path, props, allow_relink=False, force=False): + """We only update in-memory state; nothing is written to + disk, to sync linked image state to disk call syncmd.""" + + assert type(lin) == LinkedImageName + assert type(path) == str + assert ( + props is None or type(props) == dict + ), "type(props) == {0}".format(type(props)) + if props is None: + props = dict() + + lip = self.__plugins[lin.lin_type] + + if self.ischild() and not allow_relink: + raise apx.LinkedImageException(self_linked=self.__root) + + if not lip.support_attach and not force: + raise apx.LinkedImageException(attach_parent_notsup=lin.lin_type) + + # Path must be an absolute path. + if not os.path.isabs(path): + raise apx.LinkedImageException(parent_bad_notabs=path) + + # we don't bother to cleanup the path to the parent image here + # because when we allocate an Image object for the parent + # image, it will do that work for us. + pimg = self.__init_pimg(path) + + # get the cleaned up parent image path. + path = pimg.root + + # Make sure our parent image is at it's default path. (We + # don't allow attaching new images if an image is located at + # an alternate path.) + if pimg.linked.inaltroot(): + raise apx.LinkedImageException( + attach_with_curpath=(pimg.linked.path(), pimg.current_path()) + ) + + self.__validate_attach_props(PV_MODEL_PULL, props) + self.__validate_attach_img_paths(path, self.__root) + + # make a copy of the properties and update them + props = props.copy() + props[PROP_NAME] = lin + props[PROP_MODEL] = PV_MODEL_PULL + + # If we're in an alternate root, the parent must also be within + # that alternate root. + path_transform = self.get_path_transform() + if not path_transform_applied(path, path_transform): + raise apx.LinkedImageException( + parent_not_in_altroot=(path, path_transform[1]) + ) + + # Set path related properties. We use self.__root in place of + # current_path() since we may not actually be linked yet. + props[PROP_PARENT_PATH] = path.rstrip(os.sep) + os.sep + self.set_path_transform(props, path_transform, current_path=self.__root) + + for k, v in six.iteritems(lip.attach_props_def): + if k not in self.__pull_child_props: + # this prop doesn't apply to pull images + continue + if k not in props: + props[k] = v + + self.__update_props(props) + self.__pimg = pimg + + def detach_parent(self, force=False): + """We only update in memory state; nothing is written to + disk, to sync linked image state to disk call syncmd.""" + + lin = self.child_name + lip = self.__plugins[lin.lin_type] + if not force: + if self.__props[PROP_MODEL] == PV_MODEL_PUSH: + raise apx.LinkedImageException(detach_from_parent=self.__root) + + if not lip.support_detach: + raise apx.LinkedImageException( + detach_parent_notsup=lin.lin_type + ) + + # Generate a new set of linked image properties. If we have + # no children then we don't need any more properties. + props = None + + # If we have children we'll need to keep some properties. + if self.isparent(): + strip = prop_values - (self.__parent_props | temporal_props) + props = rm_dict_ent(self.__props, strip) + + # Update our linked image properties. + self.__update_props(props) + + def __insync(self): + """Determine if an image is in sync with its constraints.""" + + assert self.ischild() + + cat = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED) + excludes = self.__img.list_excludes() + + sync_fmris = [] + for fmri in cat.fmris(): + # get parent dependencies from the catalog + for f in itertools.chain.from_iterable( + a.attrlist("fmri") + for a in cat.get_entry_actions( + fmri, [pkg.catalog.Catalog.DEPENDENCY], excludes=excludes + ) + if a.name == "depend" and a.attrs["type"] == "parent" + ): + if f == pkg.actions.depend.DEPEND_SELF: + sync_fmris.append((fmri, fmri)) + else: + sync_fmris.append((fmri, pkg.fmri.PkgFmri(f))) - def audit_self(self, latest_md=True): - """If the current image is a child image, this function - audits the current image to see if it's in sync with its - parent.""" - - if not self.ischild(): - e = self.__apx_not_child() - return LI_RVTuple(pkgdefs.EXIT_OOPS, e, None) - - if not latest_md: - # we don't use the latest linked image metadata. - # instead return cached insync value which was - # computed using the initial linked image metadata - # that we loaded from disk. - if not self.__img_insync: - e = apx.LinkedImageException( - child_diverged=self.child_name) - return LI_RVTuple(pkgdefs.EXIT_DIVERGED, e, - None) - return LI_RVTuple(pkgdefs.EXIT_OK, None, None) - - if not self.__insync(): - e = apx.LinkedImageException( - child_diverged=self.child_name) - return LI_RVTuple(pkgdefs.EXIT_DIVERGED, e, None) - - return LI_RVTuple(pkgdefs.EXIT_OK, None, None) - - def insync(self, latest_md=True): - """A convenience wrapper for audit_self(). Note that we - consider non-child images as always in sync and ignore - any runtime errors.""" - - rv = self.image.linked.audit_self(latest_md=latest_md)[0] - if rv == pkgdefs.EXIT_DIVERGED: - return False - return True + if not sync_fmris: + # No packages to sync + return True - @staticmethod - def __rvdict2rv(rvdict, rv_map=None): - """Internal helper function that takes a dictionary returned - from an operations on multiple children and merges the results - into a single return code.""" - - _li_rvdict_check(rvdict) - if type(rv_map) != type(None): - assert type(rv_map) == list - for (rv_set, rv) in rv_map: - assert type(rv_set) == set - assert type(rv) == int - - if not rvdict: - return LI_RVTuple(pkgdefs.EXIT_OK, None, None) - - if not rv_map: - rv_map = [(set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK)] - - p_dicts = [ - rvtuple.rvt_p_dict - for rvtuple in six.itervalues(rvdict) - if rvtuple.rvt_p_dict is not None - ] + # create a dictionary of packages installed in the parent + ppkgs_dict = dict( + [(fmri.pkg_name, fmri) for fmri in self.parent_fmris()] + ) - rv_mapped = set() - rv_seen = set([ - rvtuple.rvt_rv - for rvtuple in six.itervalues(rvdict) - ]) - for (rv_map_set, rv_map_rv) in rv_map: - if rv_seen == rv_map_set: - return LI_RVTuple(rv_map_rv, None, p_dicts) - # keep track of all the return values that are mapped - rv_mapped |= rv_map_set - - # the mappings better have included pkgdefs.EXIT_OK - assert pkgdefs.EXIT_OK in rv_mapped - - # if we had errors for unmapped return values, bundle them up - errs = [ - rvtuple.rvt_e - for rvtuple in six.itervalues(rvdict) - if rvtuple.rvt_e and rvtuple.rvt_rv not in rv_mapped - ] - if len(errs) == 1: - err = errs[0] - elif errs: - err = apx.LinkedImageException(bundle=errs) - else: - err = None + for pkg_fmri, fmri in sync_fmris: + if fmri.pkg_name not in ppkgs_dict: + return False + # This intentionally mirrors the logic in + # __trim_nonmatching_parents1 in pkg_solver.py. + pf = ppkgs_dict[fmri.pkg_name] + if pf.version == fmri.version: + # parent dependency is satisfied, which applies + # to both DEPEND_SELF and other cases + continue + elif pkg_fmri != fmri and pf.version.is_successor( + fmri.version, pkg.version.CONSTRAINT_NONE + ): + # *not* DEPEND_SELF; parent dependency is + # satisfied + continue + return False + return True + + def audit_self(self, latest_md=True): + """If the current image is a child image, this function + audits the current image to see if it's in sync with its + parent.""" + + if not self.ischild(): + e = self.__apx_not_child() + return LI_RVTuple(pkgdefs.EXIT_OOPS, e, None) + + if not latest_md: + # we don't use the latest linked image metadata. + # instead return cached insync value which was + # computed using the initial linked image metadata + # that we loaded from disk. + if not self.__img_insync: + e = apx.LinkedImageException(child_diverged=self.child_name) + return LI_RVTuple(pkgdefs.EXIT_DIVERGED, e, None) + return LI_RVTuple(pkgdefs.EXIT_OK, None, None) + + if not self.__insync(): + e = apx.LinkedImageException(child_diverged=self.child_name) + return LI_RVTuple(pkgdefs.EXIT_DIVERGED, e, None) + + return LI_RVTuple(pkgdefs.EXIT_OK, None, None) + + def insync(self, latest_md=True): + """A convenience wrapper for audit_self(). Note that we + consider non-child images as always in sync and ignore + any runtime errors.""" + + rv = self.image.linked.audit_self(latest_md=latest_md)[0] + if rv == pkgdefs.EXIT_DIVERGED: + return False + return True + + @staticmethod + def __rvdict2rv(rvdict, rv_map=None): + """Internal helper function that takes a dictionary returned + from an operations on multiple children and merges the results + into a single return code.""" - if len(rv_seen) == 1: - # we have one consistent return value - return LI_RVTuple(list(rv_seen)[0], err, p_dicts) + _li_rvdict_check(rvdict) + if type(rv_map) != type(None): + assert type(rv_map) == list + for rv_set, rv in rv_map: + assert type(rv_set) == set + assert type(rv) == int + + if not rvdict: + return LI_RVTuple(pkgdefs.EXIT_OK, None, None) + + if not rv_map: + rv_map = [(set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK)] + + p_dicts = [ + rvtuple.rvt_p_dict + for rvtuple in six.itervalues(rvdict) + if rvtuple.rvt_p_dict is not None + ] - return LI_RVTuple(pkgdefs.EXIT_PARTIAL, err, p_dicts) + rv_mapped = set() + rv_seen = set([rvtuple.rvt_rv for rvtuple in six.itervalues(rvdict)]) + for rv_map_set, rv_map_rv in rv_map: + if rv_seen == rv_map_set: + return LI_RVTuple(rv_map_rv, None, p_dicts) + # keep track of all the return values that are mapped + rv_mapped |= rv_map_set - def audit_rvdict2rv(self, rvdict): - """Convenience function that takes a dictionary returned from - an operations on multiple children and merges the results into - a single return code.""" + # the mappings better have included pkgdefs.EXIT_OK + assert pkgdefs.EXIT_OK in rv_mapped - rv_map = [ - (set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK), - (set([pkgdefs.EXIT_DIVERGED]), pkgdefs.EXIT_DIVERGED), - (set([pkgdefs.EXIT_OK, pkgdefs.EXIT_DIVERGED]), - pkgdefs.EXIT_DIVERGED), - ] - return self.__rvdict2rv(rvdict, rv_map) + # if we had errors for unmapped return values, bundle them up + errs = [ + rvtuple.rvt_e + for rvtuple in six.itervalues(rvdict) + if rvtuple.rvt_e and rvtuple.rvt_rv not in rv_mapped + ] + if len(errs) == 1: + err = errs[0] + elif errs: + err = apx.LinkedImageException(bundle=errs) + else: + err = None + + if len(rv_seen) == 1: + # we have one consistent return value + return LI_RVTuple(list(rv_seen)[0], err, p_dicts) + + return LI_RVTuple(pkgdefs.EXIT_PARTIAL, err, p_dicts) + + def audit_rvdict2rv(self, rvdict): + """Convenience function that takes a dictionary returned from + an operations on multiple children and merges the results into + a single return code.""" + + rv_map = [ + (set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK), + (set([pkgdefs.EXIT_DIVERGED]), pkgdefs.EXIT_DIVERGED), + ( + set([pkgdefs.EXIT_OK, pkgdefs.EXIT_DIVERGED]), + pkgdefs.EXIT_DIVERGED, + ), + ] + return self.__rvdict2rv(rvdict, rv_map) - def sync_rvdict2rv(self, rvdict): - """Convenience function that takes a dictionary returned from - an operations on multiple children and merges the results into - a single return code.""" + def sync_rvdict2rv(self, rvdict): + """Convenience function that takes a dictionary returned from + an operations on multiple children and merges the results into + a single return code.""" - rv_map = [ - (set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK), - (set([pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]), pkgdefs.EXIT_OK), - (set([pkgdefs.EXIT_NOP]), pkgdefs.EXIT_NOP), - ] - return self.__rvdict2rv(rvdict, rv_map) + rv_map = [ + (set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK), + (set([pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]), pkgdefs.EXIT_OK), + (set([pkgdefs.EXIT_NOP]), pkgdefs.EXIT_NOP), + ] + return self.__rvdict2rv(rvdict, rv_map) - def detach_rvdict2rv(self, rvdict): - """Convenience function that takes a dictionary returned from - an operations on multiple children and merges the results into - a single return code.""" + def detach_rvdict2rv(self, rvdict): + """Convenience function that takes a dictionary returned from + an operations on multiple children and merges the results into + a single return code.""" - return self.__rvdict2rv(rvdict) + return self.__rvdict2rv(rvdict) - def __validate_child_attach(self, lin, path, props, - allow_relink=False): - """Sanity check the parameters associated with a child image - that we are trying to attach.""" + def __validate_child_attach(self, lin, path, props, allow_relink=False): + """Sanity check the parameters associated with a child image + that we are trying to attach.""" - assert type(lin) == LinkedImageName - assert type(props) == dict - assert type(path) == str + assert type(lin) == LinkedImageName + assert type(props) == dict + assert type(path) == str - # check the name to make sure it doesn't already exist - if self.__verify_child_name(lin) and not allow_relink: - raise apx.LinkedImageException(child_dup=lin) + # check the name to make sure it doesn't already exist + if self.__verify_child_name(lin) and not allow_relink: + raise apx.LinkedImageException(child_dup=lin) - self.__validate_attach_props(PV_MODEL_PUSH, props) + self.__validate_attach_props(PV_MODEL_PUSH, props) - # Path must be an absolute path. - if not os.path.isabs(path): - raise apx.LinkedImageException(child_path_notabs=path) + # Path must be an absolute path. + if not os.path.isabs(path): + raise apx.LinkedImageException(child_path_notabs=path) - # If we're in an alternate root, the child must also be within - # that alternate root - path_transform = self.__props[PROP_PATH_TRANSFORM] - if not path_transform_applied(path, path_transform): - raise apx.LinkedImageException( - child_not_in_altroot=(path, path_transform[1])) + # If we're in an alternate root, the child must also be within + # that alternate root + path_transform = self.__props[PROP_PATH_TRANSFORM] + if not path_transform_applied(path, path_transform): + raise apx.LinkedImageException( + child_not_in_altroot=(path, path_transform[1]) + ) - # path must be an image - try: - img_prefix = ar.ar_img_prefix(path) - except OSError as e: - raise apx.LinkedImageException(lin=lin, - child_op_failed=("find", path, e)) - if not img_prefix: - raise apx.LinkedImageException(child_bad_img=path) - - # Does the parent image (ourselves) reside in clonable BE? - # Unused variable 'be_uuid'; pylint: disable=W0612 - (be_name, be_uuid) = bootenv.BootEnv.get_be_name(self.__root) - # pylint: enable=W0612 - img_is_clonable = bool(be_name) - - # If the parent image is clonable then the new child image - # must be nested within the parents filesystem namespace. - path = path.rstrip(os.sep) + os.sep - p_root = self.__root.rstrip(os.sep) + os.sep - if img_is_clonable and not path.startswith(p_root): - raise apx.LinkedImageException( - child_not_nested=(path, p_root)) - - # Child image should not already be linked - img_li_data_props = os.path.join(img_prefix, PATH_PROP) - try: - exists = ar.ar_exists(path, img_li_data_props) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - raise apx._convert_error(e) - if exists and not allow_relink: - raise apx.LinkedImageException(img_linked=path) - - self.__validate_attach_img_paths(p_root, path) - - def __validate_attach_img_paths(self, ppath, cpath): - """Make sure there are no additional images in between the - parent and the child. For example, this prevents linking of - images if one of the images is nested within another unrelated - image. This is done by looking at all the parent directories - for both the parent and the child image until we reach a - common ancestor.""" - - # Make sure each path has a trailing '/'. - ppath = ppath.rstrip(os.sep) + os.sep - cpath = cpath.rstrip(os.sep) + os.sep - - # Make sure we're not linking to ourselves. - if ppath == cpath: - raise apx.LinkedImageException(link_to_self=ppath) - - # The parent image can't be nested nested within child. - if ppath.startswith(cpath): - raise apx.LinkedImageException( - parent_nested=(ppath, cpath)) - - # Make sure we're not linking the root image as a child. - if cpath == misc.liveroot(): - raise apx.LinkedImageException( - attach_root_as_child=cpath) - - # Make sure our current image is at it's default path. (We - # don't allow attaching new images if an image is located at - # an alternate path.) - if self.inaltroot(): - raise apx.LinkedImageException(attach_with_curpath=( - self.path(), self.current_path())) - - def abort_if_imgdir(d): - """Raise an exception if directory 'd' contains an - image.""" - try: - tmp = ar.ar_img_prefix(d) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - raise apx._convert_error(e) - if tmp: - raise apx.LinkedImageException( - intermediate_image=(ppath, cpath, d)) - - # Find the common parent directory of the both parent and the - # child image. - dir_common = os.sep - pdirs = ppath.split(os.sep)[1:-1] - cdirs = cpath.split(os.sep)[1:-1] - for pdir, cdir in zip(pdirs, cdirs): - if pdir != cdir: - break - dir_common = os.path.join(dir_common, pdir) - dir_common = dir_common.rstrip(os.sep) + os.sep - - # Test the common parent. - if ppath != dir_common and cpath != dir_common: - abort_if_imgdir(dir_common) - - # First check the parent directories of the child. - d = os.path.dirname(cpath.rstrip(os.sep)) + os.sep - while len(d) > len(dir_common): - abort_if_imgdir(d) - d = os.path.dirname(d.rstrip(os.sep)) - if d != os.sep: - d += os.sep - - # Then check the parent directories of the parent. - d = os.path.dirname(ppath.rstrip(os.sep)) + os.sep - while len(d) > len(dir_common): - abort_if_imgdir(d) - d = os.path.dirname(d.rstrip(os.sep)) - if d != os.sep: - d += os.sep - - def attach_child(self, lin, path, props, - accept=False, allow_relink=False, force=False, li_md_only=False, - li_pkg_updates=True, noexecute=False, - progtrack=None, refresh_catalogs=True, reject_list=misc.EmptyI, - show_licenses=False, update_index=True): - """Attach an image as a child to the current image (the - current image will become a parent image. This operation - results in attempting to sync the child image with the parent - image. - - For descriptions of parameters please see the descriptions in - api.py`gen_plan_*""" - - assert type(lin) == LinkedImageName - assert type(path) == str - assert props is None or type(props) == dict, \ - "type(props) == {0}".format(type(props)) - if props is None: - props = dict() - - lip = self.__plugins[lin.lin_type] - if not lip.support_attach and not force: - e = apx.LinkedImageException( - attach_child_notsup=lin.lin_type) - return LI_RVTuple(e.lix_exitrv, e, None) - - # Path must be an absolute path. - if not os.path.isabs(path): - e = apx.LinkedImageException(child_path_notabs=path) - return LI_RVTuple(e.lix_exitrv, e, None) - - # cleanup specified path - cwd = os.getcwd() - try: - os.chdir(path) - except OSError as e: - e = apx.LinkedImageException(lin=lin, - child_op_failed=("access", path, e)) - return LI_RVTuple(e.lix_exitrv, e, None) - path = os.getcwd() - os.chdir(cwd) - - # if the current image isn't linked yet then we need to - # generate some linked image properties for ourselves - if PROP_PATH not in self.__props: - p_props = self.__fabricate_parent_props() - self.__update_props(p_props) - - # sanity check the input - try: - self.__validate_child_attach(lin, path, props, - allow_relink=allow_relink) - except apx.LinkedImageException as e: - return LI_RVTuple(e.lix_exitrv, e, None) + # path must be an image + try: + img_prefix = ar.ar_img_prefix(path) + except OSError as e: + raise apx.LinkedImageException( + lin=lin, child_op_failed=("find", path, e) + ) + if not img_prefix: + raise apx.LinkedImageException(child_bad_img=path) + + # Does the parent image (ourselves) reside in clonable BE? + # Unused variable 'be_uuid'; pylint: disable=W0612 + (be_name, be_uuid) = bootenv.BootEnv.get_be_name(self.__root) + # pylint: enable=W0612 + img_is_clonable = bool(be_name) + + # If the parent image is clonable then the new child image + # must be nested within the parents filesystem namespace. + path = path.rstrip(os.sep) + os.sep + p_root = self.__root.rstrip(os.sep) + os.sep + if img_is_clonable and not path.startswith(p_root): + raise apx.LinkedImageException(child_not_nested=(path, p_root)) - # make a copy of the options and start updating them - child_props = props.copy() - child_props[PROP_NAME] = lin - child_props[PROP_MODEL] = PV_MODEL_PUSH + # Child image should not already be linked + img_li_data_props = os.path.join(img_prefix, PATH_PROP) + try: + exists = ar.ar_exists(path, img_li_data_props) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + raise apx._convert_error(e) + if exists and not allow_relink: + raise apx.LinkedImageException(img_linked=path) + + self.__validate_attach_img_paths(p_root, path) + + def __validate_attach_img_paths(self, ppath, cpath): + """Make sure there are no additional images in between the + parent and the child. For example, this prevents linking of + images if one of the images is nested within another unrelated + image. This is done by looking at all the parent directories + for both the parent and the child image until we reach a + common ancestor.""" + + # Make sure each path has a trailing '/'. + ppath = ppath.rstrip(os.sep) + os.sep + cpath = cpath.rstrip(os.sep) + os.sep + + # Make sure we're not linking to ourselves. + if ppath == cpath: + raise apx.LinkedImageException(link_to_self=ppath) + + # The parent image can't be nested nested within child. + if ppath.startswith(cpath): + raise apx.LinkedImageException(parent_nested=(ppath, cpath)) + + # Make sure we're not linking the root image as a child. + if cpath == misc.liveroot(): + raise apx.LinkedImageException(attach_root_as_child=cpath) + + # Make sure our current image is at it's default path. (We + # don't allow attaching new images if an image is located at + # an alternate path.) + if self.inaltroot(): + raise apx.LinkedImageException( + attach_with_curpath=(self.path(), self.current_path()) + ) + + def abort_if_imgdir(d): + """Raise an exception if directory 'd' contains an + image.""" + try: + tmp = ar.ar_img_prefix(d) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + raise apx._convert_error(e) + if tmp: + raise apx.LinkedImageException( + intermediate_image=(ppath, cpath, d) + ) + + # Find the common parent directory of the both parent and the + # child image. + dir_common = os.sep + pdirs = ppath.split(os.sep)[1:-1] + cdirs = cpath.split(os.sep)[1:-1] + for pdir, cdir in zip(pdirs, cdirs): + if pdir != cdir: + break + dir_common = os.path.join(dir_common, pdir) + dir_common = dir_common.rstrip(os.sep) + os.sep + + # Test the common parent. + if ppath != dir_common and cpath != dir_common: + abort_if_imgdir(dir_common) + + # First check the parent directories of the child. + d = os.path.dirname(cpath.rstrip(os.sep)) + os.sep + while len(d) > len(dir_common): + abort_if_imgdir(d) + d = os.path.dirname(d.rstrip(os.sep)) + if d != os.sep: + d += os.sep + + # Then check the parent directories of the parent. + d = os.path.dirname(ppath.rstrip(os.sep)) + os.sep + while len(d) > len(dir_common): + abort_if_imgdir(d) + d = os.path.dirname(d.rstrip(os.sep)) + if d != os.sep: + d += os.sep + + def attach_child( + self, + lin, + path, + props, + accept=False, + allow_relink=False, + force=False, + li_md_only=False, + li_pkg_updates=True, + noexecute=False, + progtrack=None, + refresh_catalogs=True, + reject_list=misc.EmptyI, + show_licenses=False, + update_index=True, + ): + """Attach an image as a child to the current image (the + current image will become a parent image. This operation + results in attempting to sync the child image with the parent + image. + + For descriptions of parameters please see the descriptions in + api.py`gen_plan_*""" + + assert type(lin) == LinkedImageName + assert type(path) == str + assert ( + props is None or type(props) == dict + ), "type(props) == {0}".format(type(props)) + if props is None: + props = dict() + + lip = self.__plugins[lin.lin_type] + if not lip.support_attach and not force: + e = apx.LinkedImageException(attach_child_notsup=lin.lin_type) + return LI_RVTuple(e.lix_exitrv, e, None) + + # Path must be an absolute path. + if not os.path.isabs(path): + e = apx.LinkedImageException(child_path_notabs=path) + return LI_RVTuple(e.lix_exitrv, e, None) + + # cleanup specified path + cwd = os.getcwd() + try: + os.chdir(path) + except OSError as e: + e = apx.LinkedImageException( + lin=lin, child_op_failed=("access", path, e) + ) + return LI_RVTuple(e.lix_exitrv, e, None) + path = os.getcwd() + os.chdir(cwd) + + # if the current image isn't linked yet then we need to + # generate some linked image properties for ourselves + if PROP_PATH not in self.__props: + p_props = self.__fabricate_parent_props() + self.__update_props(p_props) + + # sanity check the input + try: + self.__validate_child_attach( + lin, path, props, allow_relink=allow_relink + ) + except apx.LinkedImageException as e: + return LI_RVTuple(e.lix_exitrv, e, None) + + # make a copy of the options and start updating them + child_props = props.copy() + child_props[PROP_NAME] = lin + child_props[PROP_MODEL] = PV_MODEL_PUSH + + # set path related properties + self.set_path_transform( + child_props, self.get_path_transform(), current_path=path + ) + + # fill in any missing defaults options + for k, v in six.iteritems(lip.attach_props_def): + if k not in child_props: + child_props[k] = v + + # attach the child in memory + lip.attach_child_inmemory(child_props, allow_relink) + + if noexecute and li_md_only: + # we've validated parameters, nothing else to do + return LI_RVTuple(pkgdefs.EXIT_OK, None, None) + + # update the child + try: + lic = LinkedImageChild(self, lin) + except apx.LinkedImageException as e: + return LI_RVTuple(e.lix_exitrv, e, None) + + rvdict = {} + list( + self.__children_op( + _pkg_op=pkgdefs.PKG_OP_SYNC, + _lic_list=[lic], + _rvdict=rvdict, + _progtrack=progtrack, + _failfast=False, + _expect_plan=True, + _syncmd_tmp=True, + accept=accept, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + refresh_catalogs=refresh_catalogs, + reject_list=reject_list, + show_licenses=show_licenses, + update_index=update_index, + ) + ) + + rvtuple = rvdict[lin] + + if noexecute or rvtuple.rvt_rv not in [ + pkgdefs.EXIT_OK, + pkgdefs.EXIT_NOP, + ]: + return rvtuple + + # commit child image property updates + rvtuple2 = lip.sync_children_todisk() + _li_rvtuple_check(rvtuple2) + if rvtuple2.rvt_e: + return rvtuple2 + + # save parent image properties + self.syncmd() + + # The recursive child operation may have returned NOP, but + # since we always update our own image metadata, we always + # return OK. + if rvtuple.rvt_rv == pkgdefs.EXIT_NOP: + return LI_RVTuple(pkgdefs.EXIT_OK, None, None) + return rvtuple - # set path related properties - self.set_path_transform(child_props, - self.get_path_transform(), current_path=path) + def audit_children(self, lin_list): + """Audit one or more children of the current image to see if + they are in sync with this image.""" + + if lin_list == []: + lin_list = None + + lic_dict, rvdict = self.__children_init( + lin_list=lin_list, failfast=False + ) + + list( + self.__children_op( + _pkg_op=pkgdefs.PKG_OP_AUDIT_LINKED, + _lic_list=list(lic_dict.values()), + _rvdict=rvdict, + _progtrack=progress.QuietProgressTracker(), + _failfast=False, + ) + ) + return rvdict - # fill in any missing defaults options - for k, v in six.iteritems(lip.attach_props_def): - if k not in child_props: - child_props[k] = v + def sync_children( + self, + lin_list, + accept=False, + li_md_only=False, + li_pkg_updates=True, + progtrack=None, + noexecute=False, + refresh_catalogs=True, + reject_list=misc.EmptyI, + show_licenses=False, + update_index=True, + ): + """Sync one or more children of the current image.""" + + if progtrack is None: + progtrack = progress.NullProgressTracker() + + if lin_list == []: + lin_list = None + + lic_dict = self.__children_init(lin_list=lin_list) + + _syncmd_tmp = True + if not noexecute and li_md_only: + _syncmd_tmp = False + + rvdict = {} + list( + self.__children_op( + _pkg_op=pkgdefs.PKG_OP_SYNC, + _lic_list=list(lic_dict.values()), + _rvdict=rvdict, + _progtrack=progtrack, + _failfast=False, + _expect_plan=True, + _syncmd_tmp=_syncmd_tmp, + accept=accept, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + refresh_catalogs=refresh_catalogs, + reject_list=reject_list, + show_licenses=show_licenses, + update_index=update_index, + ) + ) + return rvdict - # attach the child in memory - lip.attach_child_inmemory(child_props, allow_relink) + def detach_children( + self, + lin_list, + force=False, + noexecute=False, + li_md_only=False, + li_pkg_updates=True, + ): + """Detach one or more children from the current image. This + operation results in the removal of any constraint package + from the child images.""" + + if lin_list == []: + lin_list = None + + lic_dict, rvdict = self.__children_init( + lin_list=lin_list, failfast=False + ) + + # check if we support detach for these children. we don't use + # iteritems() when walking lic_dict because we might modify + # lic_dict. + for lin in lic_dict: + lip = self.__plugins[lin.lin_type] + if lip.support_detach or force: + continue + + # we can't detach this type of image. + e = apx.LinkedImageException(detach_child_notsup=lin.lin_type) + rvdict[lin] = LI_RVTuple(e.lix_exitrv, e, None) + _li_rvtuple_check(rvdict[lin]) + del lic_dict[lin] + + # do the detach + list( + self.__children_op( + _pkg_op=pkgdefs.PKG_OP_DETACH, + _lic_list=list(lic_dict.values()), + _rvdict=rvdict, + _progtrack=progress.NullProgressTracker(), + _failfast=False, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + noexecute=noexecute, + ) + ) + + # if any of the children successfully detached, then we want + # to discard our metadata for that child. + for lin, rvtuple in six.iteritems(rvdict): + # if the detach failed leave metadata in parent + if rvtuple.rvt_e and not force: + continue + + # detach the child in memory + lip = self.__plugins[lin.lin_type] + lip.detach_child_inmemory(lin) + + if noexecute: + continue + + # commit child image property updates + rvtuple2 = lip.sync_children_todisk() + _li_rvtuple_check(rvtuple2) + + # don't overwrite previous errors + if rvtuple2.rvt_e and rvtuple.rvt_e is None: + rvdict[lin] = rvtuple2 + + if not (self.ischild() or self.isparent()): + # we're not linked anymore, so delete all our linked + # properties. + self.__update_props() + self.syncmd() - if noexecute and li_md_only: - # we've validated parameters, nothing else to do - return LI_RVTuple(pkgdefs.EXIT_OK, None, None) + return rvdict - # update the child + def __children_op( + self, + _pkg_op, + _lic_list, + _rvdict, + _progtrack, + _failfast, + _expect_plan=False, + _ignore_syncmd_nop=True, + _syncmd_tmp=False, + _pd=None, + **kwargs, + ): + """Wrapper for __children_op_vec() to stay compatible with old + callers which only support one operation for all linked images. + + '_pkg_op' is the pkg.1 operation that we're going to perform + + '_lic_list' is a list of linked image child objects to perform + the operation on. + + '_ignore_syncmd_nop' a boolean that indicates if we should + always recurse into a child even if the linked image meta data + isn't changing. + + See __children_op_vec() for an explanation of the remaining + options.""" + + for p_dict in self.__children_op_vec( + _lic_op_vectors=[(_pkg_op, _lic_list, kwargs, _ignore_syncmd_nop)], + _rvdict=_rvdict, + _progtrack=_progtrack, + _failfast=_failfast, + _expect_plan=_expect_plan, + _syncmd_tmp=_syncmd_tmp, + _pd=_pd, + stage=pkgdefs.API_STAGE_DEFAULT, + ): + yield p_dict + + def __children_op_vec( + self, + _lic_op_vectors, + _rvdict, + _progtrack, + _failfast, + _expect_plan=False, + _syncmd_tmp=False, + _pd=None, + stage=pkgdefs.API_STAGE_DEFAULT, + ): + """An iterator function which performs a linked image + operation on multiple children in parallel. + + '_lic_op_vectors' is a list of tuples containing the operation + to perform, the list of linked images the operation is to be + performed on, the kwargs for this operation and if the metadata + sync nop should be ignored in the following form: + [(pkg_op, lin_list, kwargs, ignore_syncmd_nop), ...] + + '_rvdict' is a dictionary, indexed by linked image name, which + contains rvtuples of the result of the operation for each + child. + + '_prograck' is a ProgressTracker pointer. + + '_failfast' is a boolean. If True and we encounter a failure + operating on a child then we raise an exception immediately. + If False then we'll attempt to perform the operation on all + children and rvdict will contain a LI_RVTuple result for all + children. + + '_expect_plan' is a boolean that indicates if we expect this + operation to generate an image plan. + + '_syncmd_tmp' a boolean that indicates if we should write + linked image metadata in a temporary location in child images, + or just overwrite any existing data. + + '_pd' a PlanDescription pointer.""" + + lic_all = reduce(operator.add, [i[1] for i in _lic_op_vectors], []) + lic_num = len(lic_all) + + # make sure we don't have any duplicate LICs or duplicate LINs + assert lic_num == len(set(lic_all)) + assert lic_num == len(set([i.child_name for i in lic_all])) + + # At the moment the PT doesn't seem to really use the operation + # type for display reasons. It only uses it to treat pubcheck + # differently. Therefore it should be sufficient to skip the + # operation type in case we have different operations going on + # at the same time. + # Additionally, if the operation is the same for all children + # we can use some optimizations. + concurrency = global_settings.client_concurrency + if len(_lic_op_vectors) == 1: + pkg_op = _lic_op_vectors[0][0] + + if pkg_op in [ + pkgdefs.PKG_OP_AUDIT_LINKED, + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + # These operations are cheap so ideally we'd + # like to use full parallelism. But if the user + # specified a concurrency limit we should + # respect that. + if not global_settings.client_concurrency_set: + # No limit was specified, use full + # concurrency. + concurrency = -1 + else: + pkg_op = "" + + if lic_num: + _progtrack.li_recurse_start(pkg_op, lic_num) + + # If we have a plan for the current image that means linked + # image metadata is probably changing so we always save it to + # a temporary file (and we don't overwrite the existing + # metadata until after we execute the plan). + if _pd is not None: + _syncmd_tmp = True + + lic_setup = [] + for pkg_op, lic_list, kwargs, ignore_syncmd_nop in _lic_op_vectors: + if stage != pkgdefs.API_STAGE_DEFAULT: + kwargs = kwargs.copy() + kwargs["stage"] = stage + + # get parent metadata common to all child images + _pmd = None + if pkg_op != pkgdefs.PKG_OP_DETACH: + ppubs = get_pubs(self.__img) + ppkgs = get_packages(self.__img, pd=_pd) + pfacets = get_inheritable_facets(self.__img, pd=_pd) + _pmd = (ppubs, ppkgs, pfacets) + + # setup operation for each child + for lic in lic_list: try: - lic = LinkedImageChild(self, lin) + lic.child_op_setup( + pkg_op, + _pmd, + _progtrack, + ignore_syncmd_nop, + _syncmd_tmp, + **kwargs, + ) + lic_setup.append(lic) except apx.LinkedImageException as e: - return LI_RVTuple(e.lix_exitrv, e, None) - - rvdict = {} - list(self.__children_op( - _pkg_op=pkgdefs.PKG_OP_SYNC, - _lic_list=[lic], - _rvdict=rvdict, - _progtrack=progtrack, - _failfast=False, - _expect_plan=True, - _syncmd_tmp=True, - accept=accept, - li_md_only=li_md_only, - li_pkg_updates=li_pkg_updates, - noexecute=noexecute, - refresh_catalogs=refresh_catalogs, - reject_list=reject_list, - show_licenses=show_licenses, - update_index=update_index)) - - rvtuple = rvdict[lin] - - if noexecute or rvtuple.rvt_rv not in [ - pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP ]: - return rvtuple - - # commit child image property updates - rvtuple2 = lip.sync_children_todisk() - _li_rvtuple_check(rvtuple2) - if rvtuple2.rvt_e: - return rvtuple2 - - # save parent image properties - self.syncmd() - - # The recursive child operation may have returned NOP, but - # since we always update our own image metadata, we always - # return OK. - if rvtuple.rvt_rv == pkgdefs.EXIT_NOP: - return LI_RVTuple(pkgdefs.EXIT_OK, None, None) - return rvtuple - - def audit_children(self, lin_list): - """Audit one or more children of the current image to see if - they are in sync with this image.""" - - if lin_list == []: - lin_list = None - - lic_dict, rvdict = self.__children_init(lin_list=lin_list, - failfast=False) - - list(self.__children_op( - _pkg_op=pkgdefs.PKG_OP_AUDIT_LINKED, - _lic_list=list(lic_dict.values()), - _rvdict=rvdict, - _progtrack=progress.QuietProgressTracker(), - _failfast=False)) - return rvdict - - def sync_children(self, lin_list, accept=False, - li_md_only=False, li_pkg_updates=True, progtrack=None, - noexecute=False, refresh_catalogs=True, reject_list=misc.EmptyI, - show_licenses=False, update_index=True): - """Sync one or more children of the current image.""" - - if progtrack is None: - progtrack = progress.NullProgressTracker() - - if lin_list == []: - lin_list = None - - lic_dict = self.__children_init(lin_list=lin_list) - - _syncmd_tmp = True - if not noexecute and li_md_only: - _syncmd_tmp = False - - rvdict = {} - list(self.__children_op( - _pkg_op=pkgdefs.PKG_OP_SYNC, - _lic_list=list(lic_dict.values()), - _rvdict=rvdict, - _progtrack=progtrack, - _failfast=False, - _expect_plan=True, - _syncmd_tmp=_syncmd_tmp, - accept=accept, - li_md_only=li_md_only, - li_pkg_updates=li_pkg_updates, - noexecute=noexecute, - refresh_catalogs=refresh_catalogs, - reject_list=reject_list, - show_licenses=show_licenses, - update_index=update_index)) - return rvdict - - def detach_children(self, lin_list, force=False, noexecute=False, - li_md_only=False, li_pkg_updates=True): - """Detach one or more children from the current image. This - operation results in the removal of any constraint package - from the child images.""" - - if lin_list == []: - lin_list = None - - lic_dict, rvdict = self.__children_init(lin_list=lin_list, - failfast=False) - - # check if we support detach for these children. we don't use - # iteritems() when walking lic_dict because we might modify - # lic_dict. - for lin in lic_dict: - lip = self.__plugins[lin.lin_type] - if lip.support_detach or force: - continue - - # we can't detach this type of image. - e = apx.LinkedImageException( - detach_child_notsup=lin.lin_type) - rvdict[lin] = LI_RVTuple(e.lix_exitrv, e, None) - _li_rvtuple_check(rvdict[lin]) - del lic_dict[lin] - - # do the detach - list(self.__children_op( - _pkg_op=pkgdefs.PKG_OP_DETACH, - _lic_list=list(lic_dict.values()), - _rvdict=rvdict, - _progtrack=progress.NullProgressTracker(), - _failfast=False, - li_md_only=li_md_only, - li_pkg_updates=li_pkg_updates, - noexecute=noexecute)) - - # if any of the children successfully detached, then we want - # to discard our metadata for that child. - for lin, rvtuple in six.iteritems(rvdict): - - # if the detach failed leave metadata in parent - if rvtuple.rvt_e and not force: - continue - - # detach the child in memory - lip = self.__plugins[lin.lin_type] - lip.detach_child_inmemory(lin) - - if noexecute: - continue - - # commit child image property updates - rvtuple2 = lip.sync_children_todisk() - _li_rvtuple_check(rvtuple2) - - # don't overwrite previous errors - if rvtuple2.rvt_e and rvtuple.rvt_e is None: - rvdict[lin] = rvtuple2 - - if not (self.ischild() or self.isparent()): - # we're not linked anymore, so delete all our linked - # properties. - self.__update_props() - self.syncmd() - - return rvdict - - def __children_op(self, _pkg_op, _lic_list, _rvdict, _progtrack, - _failfast, _expect_plan=False, _ignore_syncmd_nop=True, - _syncmd_tmp=False, _pd=None, **kwargs): - """Wrapper for __children_op_vec() to stay compatible with old - callers which only support one operation for all linked images. - - '_pkg_op' is the pkg.1 operation that we're going to perform - - '_lic_list' is a list of linked image child objects to perform - the operation on. - - '_ignore_syncmd_nop' a boolean that indicates if we should - always recurse into a child even if the linked image meta data - isn't changing. - - See __children_op_vec() for an explanation of the remaining - options.""" - - for p_dict in self.__children_op_vec( - _lic_op_vectors=[(_pkg_op, _lic_list, kwargs, - _ignore_syncmd_nop)], - _rvdict=_rvdict, - _progtrack=_progtrack, - _failfast=_failfast, - _expect_plan=_expect_plan, - _syncmd_tmp=_syncmd_tmp, - _pd=_pd, - stage=pkgdefs.API_STAGE_DEFAULT - ): - yield p_dict - - def __children_op_vec(self, _lic_op_vectors, _rvdict, _progtrack, - _failfast, _expect_plan=False, _syncmd_tmp=False, _pd=None, - stage=pkgdefs.API_STAGE_DEFAULT): - """An iterator function which performs a linked image - operation on multiple children in parallel. - - '_lic_op_vectors' is a list of tuples containing the operation - to perform, the list of linked images the operation is to be - performed on, the kwargs for this operation and if the metadata - sync nop should be ignored in the following form: - [(pkg_op, lin_list, kwargs, ignore_syncmd_nop), ...] - - '_rvdict' is a dictionary, indexed by linked image name, which - contains rvtuples of the result of the operation for each - child. - - '_prograck' is a ProgressTracker pointer. - - '_failfast' is a boolean. If True and we encounter a failure - operating on a child then we raise an exception immediately. - If False then we'll attempt to perform the operation on all - children and rvdict will contain a LI_RVTuple result for all - children. - - '_expect_plan' is a boolean that indicates if we expect this - operation to generate an image plan. - - '_syncmd_tmp' a boolean that indicates if we should write - linked image metadata in a temporary location in child images, - or just overwrite any existing data. - - '_pd' a PlanDescription pointer.""" - - - lic_all = reduce(operator.add, - [i[1] for i in _lic_op_vectors], []) - lic_num = len(lic_all) - - # make sure we don't have any duplicate LICs or duplicate LINs - assert lic_num == len(set(lic_all)) - assert lic_num == len(set([i.child_name for i in lic_all])) - - # At the moment the PT doesn't seem to really use the operation - # type for display reasons. It only uses it to treat pubcheck - # differently. Therefore it should be sufficient to skip the - # operation type in case we have different operations going on - # at the same time. - # Additionally, if the operation is the same for all children - # we can use some optimizations. - concurrency = global_settings.client_concurrency - if len(_lic_op_vectors) == 1: - pkg_op = _lic_op_vectors[0][0] - - if pkg_op in [ pkgdefs.PKG_OP_AUDIT_LINKED, - pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP ]: - # These operations are cheap so ideally we'd - # like to use full parallelism. But if the user - # specified a concurrency limit we should - # respect that. - if not global_settings.client_concurrency_set: - # No limit was specified, use full - # concurrency. - concurrency = -1 - else: - pkg_op = "" - - if lic_num: - _progtrack.li_recurse_start(pkg_op, lic_num) - - # If we have a plan for the current image that means linked - # image metadata is probably changing so we always save it to - # a temporary file (and we don't overwrite the existing - # metadata until after we execute the plan). - if _pd is not None: - _syncmd_tmp = True - - lic_setup = [] - for pkg_op, lic_list, kwargs, ignore_syncmd_nop in \ - _lic_op_vectors: - - if stage != pkgdefs.API_STAGE_DEFAULT: - kwargs = kwargs.copy() - kwargs["stage"] = stage - - # get parent metadata common to all child images - _pmd = None - if pkg_op != pkgdefs.PKG_OP_DETACH: - ppubs = get_pubs(self.__img) - ppkgs = get_packages(self.__img, pd=_pd) - pfacets = get_inheritable_facets(self.__img, - pd=_pd) - _pmd = (ppubs, ppkgs, pfacets) - - # setup operation for each child - for lic in lic_list: - try: - lic.child_op_setup(pkg_op, _pmd, - _progtrack, ignore_syncmd_nop, - _syncmd_tmp, **kwargs) - lic_setup.append(lic) - except apx.LinkedImageException as e: - _rvdict[lic.child_name] = \ - LI_RVTuple(e.lix_exitrv, e, None) - - # if _failfast is true, then throw an exception if we failed - # to setup any of the children. if _failfast is false we'll - # continue to perform the operation on any children that - # successfully initialized and we'll report setup errors along - # with the final results for all children. - if _failfast and _li_rvdict_exceptions(_rvdict): - # before we raise an exception we need to cleanup any - # children that we setup. - for lic in lic_setup: - lic.child_op_abort() - # raise an exception - _li_rvdict_raise_exceptions(_rvdict) - - def __child_op_finish(lic, lic_list, _rvdict, - _progtrack, _failfast, _expect_plan): - """An iterator function invoked when a child has - finished an operation. - - 'lic' is the child that has finished execution. - - 'lic_list' a list of children to remove 'lic' from. - - See __children_op() for an explanation of the other - parameters.""" - - assert lic.child_op_is_done() - - lic_list.remove(lic) - - rvtuple, stdout, stderr = lic.child_op_rv(_expect_plan) - _li_rvtuple_check(rvtuple) - _rvdict[lic.child_name] = rvtuple - - # check if we should raise an exception - if _failfast and _li_rvdict_exceptions(_rvdict): - - # we're going to raise an exception. abort - # the remaining children. - for lic in lic_list: - lic.child_op_abort() - - # raise an exception - _li_rvdict_raise_exceptions(_rvdict) - - if rvtuple.rvt_rv in [ pkgdefs.EXIT_OK, - pkgdefs.EXIT_NOP ]: - - # only display child output if there was no - # error (otherwise the exception includes the - # output so we'll display it twice.) - _progtrack.li_recurse_output(lic.child_name, - stdout, stderr) - - # check if we should yield a plan. - if _expect_plan and rvtuple.rvt_rv == pkgdefs.EXIT_OK: - yield rvtuple.rvt_p_dict - - # check if we did everything we needed to do during child - # setup. (this can happen if we're just doing an implicit - # syncmd during setup we discover the linked image metadata - # isn't changing.) we iterate over a copy of lic_setup to - # allow __child_op_finish() to remove elements from lic_setup - # while we're walking through it. - for lic in copy.copy(lic_setup): - if not lic.child_op_is_done(): - continue - for p_dict in __child_op_finish(lic, lic_setup, - _rvdict, _progtrack, _failfast, - _expect_plan): - yield p_dict - - # keep track of currently running children - lic_running = [] - - # keep going as long as there are children to process + _rvdict[lic.child_name] = LI_RVTuple(e.lix_exitrv, e, None) + + # if _failfast is true, then throw an exception if we failed + # to setup any of the children. if _failfast is false we'll + # continue to perform the operation on any children that + # successfully initialized and we'll report setup errors along + # with the final results for all children. + if _failfast and _li_rvdict_exceptions(_rvdict): + # before we raise an exception we need to cleanup any + # children that we setup. + for lic in lic_setup: + lic.child_op_abort() + # raise an exception + _li_rvdict_raise_exceptions(_rvdict) + + def __child_op_finish( + lic, lic_list, _rvdict, _progtrack, _failfast, _expect_plan + ): + """An iterator function invoked when a child has + finished an operation. + + 'lic' is the child that has finished execution. + + 'lic_list' a list of children to remove 'lic' from. + + See __children_op() for an explanation of the other + parameters.""" + + assert lic.child_op_is_done() + + lic_list.remove(lic) + + rvtuple, stdout, stderr = lic.child_op_rv(_expect_plan) + _li_rvtuple_check(rvtuple) + _rvdict[lic.child_name] = rvtuple + + # check if we should raise an exception + if _failfast and _li_rvdict_exceptions(_rvdict): + # we're going to raise an exception. abort + # the remaining children. + for lic in lic_list: + lic.child_op_abort() + + # raise an exception + _li_rvdict_raise_exceptions(_rvdict) + + if rvtuple.rvt_rv in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]: + # only display child output if there was no + # error (otherwise the exception includes the + # output so we'll display it twice.) + _progtrack.li_recurse_output(lic.child_name, stdout, stderr) + + # check if we should yield a plan. + if _expect_plan and rvtuple.rvt_rv == pkgdefs.EXIT_OK: + yield rvtuple.rvt_p_dict + + # check if we did everything we needed to do during child + # setup. (this can happen if we're just doing an implicit + # syncmd during setup we discover the linked image metadata + # isn't changing.) we iterate over a copy of lic_setup to + # allow __child_op_finish() to remove elements from lic_setup + # while we're walking through it. + for lic in copy.copy(lic_setup): + if not lic.child_op_is_done(): + continue + for p_dict in __child_op_finish( + lic, lic_setup, _rvdict, _progtrack, _failfast, _expect_plan + ): + yield p_dict + + # keep track of currently running children + lic_running = [] + + # keep going as long as there are children to process + progtrack_update = False + while len(lic_setup) or len(lic_running): + while lic_setup and ( + concurrency > len(lic_running) or concurrency <= 0 + ): + # start processing on a child + progtrack_update = True + lic = lic_setup.pop() + lic_running.append(lic) + lic.child_op_start() + + if progtrack_update: + # display progress on children progtrack_update = False - while len(lic_setup) or len(lic_running): - - while lic_setup and ( - concurrency > len(lic_running) or - concurrency <= 0): - # start processing on a child - progtrack_update = True - lic = lic_setup.pop() - lic_running.append(lic) - lic.child_op_start() - - if progtrack_update: - # display progress on children - progtrack_update = False - done = lic_num - len(lic_setup) - \ - len(lic_running) - lin_running = sorted([ - lic.child_name for lic in lic_running]) - _progtrack.li_recurse_status(lin_running, - done) - - # poll on all the linked image children and see which - # ones have pending output. - fd_hash = dict([ - (lic.fileno(), lic) - for lic in lic_running - ]) - p = select.poll() - for fd in fd_hash.keys(): - p.register(fd, select.POLLIN) - events = p.poll() - lic_list = [ fd_hash[event[0]] for event in events ] - - for lic in lic_list: - _progtrack.li_recurse_progress(lic.child_name) - if not lic.child_op_is_done(): - continue - # a child finished processing - progtrack_update = True - for p_dict in __child_op_finish(lic, - lic_running, _rvdict, _progtrack, - _failfast, _expect_plan): - yield p_dict - - _li_rvdict_check(_rvdict) - if lic_num: - _progtrack.li_recurse_end() - - def __children_init(self, lin_list=None, li_ignore=None, failfast=True): - """Initialize LinkedImageChild objects for children specified - in 'lin_list'. If 'lin_list' is not specified, then - initialize objects for all children (excluding any being - ignored via 'li_ignore').""" - - # you can't specify children to operate on and children to be - # ignored at the same time - assert lin_list is None or li_ignore is None - - # if no children we listed, build a list of children - if lin_list is None: - lin_list = [ - i[0] - for i in self.__list_children(li_ignore) - ] - else: - self.verify_names(lin_list) - - rvdict = {} - lic_dict = {} - for lin in lin_list: - try: - lic = LinkedImageChild(self, lin) - lic_dict[lin] = lic - except apx.LinkedImageException as e: - rvdict[lin] = LI_RVTuple(e.lix_exitrv, e, None) - - if failfast: - _li_rvdict_raise_exceptions(rvdict) - return lic_dict - - return (lic_dict, rvdict) - - def __recursion_init(self, li_ignore): - """Initialize child objects used during recursive packaging - operations.""" - - self.__lic_ignore = li_ignore - self.__lic_dict = self.__children_init(li_ignore=li_ignore) - - def api_recurse_init(self, li_ignore=None, repos=None): - """Initialize planning state. If we're a child image we save - our current state (which may reflect a planned state that we - have not committed to disk) into the plan. We also initialize - all our children to prepare to recurse into them.""" - - if PROP_RECURSE in self.__props and \ - not self.__props[PROP_RECURSE]: - # we don't want to recurse - self.__recursion_init(li_ignore=[]) - return - - # Initialize children - self.__recursion_init(li_ignore) - - if not self.__lic_dict: - # we don't need to recurse - return - - # if we have any children we don't support operations using - # temporary repositories. - if repos: - raise apx.PlanCreationException(no_tmp_origins=True) - - def api_recurse_pubcheck(self, progtrack): - """Do a recursive publisher check""" - - # get a list of of children to recurse into. - lic_list = list(self.__lic_dict.values()) - - # do a publisher check on all of them - rvdict = {} - list(self.__children_op( - _pkg_op=pkgdefs.PKG_OP_PUBCHECK, - _lic_list=lic_list, - _rvdict=rvdict, - _progtrack=progtrack, - _failfast=False)) - - # raise an exception if one or more children failed the - # publisher check. - _li_rvdict_raise_exceptions(rvdict) - - def api_recurse_hfo_cleanup(self, progtrack): - """Do a recursive hot-fix origin cleanup""" - - # get a list of of children to recurse into. - lic_list = list(self.__lic_dict.values()) - - rvdict = {} - list(self.__children_op( - _pkg_op=pkgdefs.PKG_OP_HOTFIX_CLEANUP, - _lic_list=lic_list, - _rvdict=rvdict, - _progtrack=progtrack, - _failfast=False)) - - # raise an exception if one or more children failed - _li_rvdict_raise_exceptions(rvdict) - - def __api_recurse(self, stage, progtrack): - """This is an iterator function. It recurses into linked - image children to perform the specified operation. - """ - - # get a pointer to the current image plan - pd = self.__img.imageplan.pd - - # get a list of of children to recurse into. - lic_list = list(self.__lic_dict.values()) - - # sanity check stage - assert stage in [pkgdefs.API_STAGE_PLAN, - pkgdefs.API_STAGE_PREPARE, pkgdefs.API_STAGE_EXECUTE] - - # if we're ignoring all children then we can't be recursing - assert pd.children_ignored != [] or lic_list == [] - - # sanity check the plan description state - if stage == pkgdefs.API_STAGE_PLAN: - # the state should be uninitialized - assert pd.children_planned == [] - assert pd.children_nop == [] - else: - # if we ignored all children, we better not have - # recursed into any children. - assert pd.children_ignored != [] or \ - pd.children_planned == pd.children_nop == [] - - # there shouldn't be any overlap between sets of - # children in the plan - assert not (set(pd.children_planned) & - set(pd.children_nop)) - if pd.children_ignored: - assert not (set(pd.children_ignored) & - set(pd.children_planned)) - assert not (set(pd.children_ignored) & - set(pd.children_nop)) - - # make sure set of child handles matches the set of - # previously planned children. - assert set(self.__lic_dict) == set(pd.children_planned) - - # if we're in the planning stage, we should pass the current - # image plan onto the child and also expect an image plan from - # the child. - expect_plan = False - if stage == pkgdefs.API_STAGE_PLAN: - expect_plan = True - - # Assemble list of LICs from LINs in pd.child_op_vectors and - # create new lic_op_vectors to pass to __children_op_vec(). - lic_op_vectors = [] - for op, lin_list, kwargs, ignore_syncmd_nop in \ - pd.child_op_vectors: - assert "stage" not in kwargs - lic_list = [] - for l in lin_list: - try: - lic_list.append(self.__lic_dict[l]) - except KeyError: - # For the prepare and execute phase we - # remove children for which there is - # nothing to do from self.__lic_dict. - # So ignore those we can't find. - pass - lic_op_vectors.append((op, lic_list, kwargs, - ignore_syncmd_nop)) - - rvdict = {} - for p_dict in self.__children_op_vec( - _lic_op_vectors=lic_op_vectors, - _rvdict=rvdict, - _progtrack=progtrack, - _failfast=True, - _expect_plan=expect_plan, - stage=stage, - _pd=pd): - yield p_dict - - assert not _li_rvdict_exceptions(rvdict) - - for lin in rvdict: - # check for children that don't need any updates - if rvdict[lin].rvt_rv == pkgdefs.EXIT_NOP: - assert lin not in pd.children_nop - pd.children_nop.append(lin) - del self.__lic_dict[lin] - - # record the children that are done planning - if stage == pkgdefs.API_STAGE_PLAN and \ - rvdict[lin].rvt_rv == pkgdefs.EXIT_OK: - assert lin not in pd.children_planned - pd.children_planned.append(lin) - - @staticmethod - def __recursion_ops(api_op): - """Determine what pkg command to use when recursing into child - images.""" - - # - # given the api operation being performed on the current - # image, figure out what api operation should be performed on - # child images. - # - # the recursion policy which hard coded here is that if we do - # an pkg update in the parent image without any packages - # specified (ie, we want to update everything) then when we - # recurse we'll also do an update of everything. but if we're - # doing any other operation like install, uninstall, an update - # of specific packages, etc, then when we recurse we'll do a - # sync in the child. - # - - - # To improve performance we assume the child is already in sync, - # so if its linked image metadata isn't changing then the child - # won't need any updates so there will be no need to recurse - # into it. - ignore_syncmd_nop = False - pkg_op_erecurse = None - - if api_op == pkgdefs.API_OP_SYNC: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - # If we are doing an explicit sync, we do have to make - # sure we actually recurse into the child and sync - # metadata. - ignore_syncmd_nop = True - elif api_op == pkgdefs.API_OP_INSTALL: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - pkg_op_erecurse = pkgdefs.PKG_OP_INSTALL - elif api_op == pkgdefs.API_OP_CHANGE_FACET: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - pkg_op_erecurse = pkgdefs.PKG_OP_CHANGE_FACET - elif api_op == pkgdefs.API_OP_CHANGE_VARIANT: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - pkg_op_erecurse = pkgdefs.PKG_OP_CHANGE_VARIANT - if api_op == pkgdefs.API_OP_UPDATE: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - pkg_op_erecurse = pkgdefs.PKG_OP_UPDATE - elif api_op == pkgdefs.API_OP_UNINSTALL: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - pkg_op_erecurse = pkgdefs.PKG_OP_UNINSTALL - else: - pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - - return pkg_op_irecurse, pkg_op_erecurse, ignore_syncmd_nop - - @staticmethod - def __recursion_args(op, refresh_catalogs, update_index, api_kwargs): - """Determine what pkg command arguments to use when recursing - into child images.""" - - kwargs = {} - kwargs["noexecute"] = api_kwargs["noexecute"] - kwargs["refresh_catalogs"] = refresh_catalogs - kwargs["show_licenses"] = False - kwargs["update_index"] = update_index - - # - # when we recurse we always accept all new licenses (for now). - # - # ultimately (when start yielding back plan descriptions for - # children) in addition to accepting licenses on the plan for - # the current image the api client will also have to - # explicitly accept licenses for all child images. but until - # that happens we'll just assume that the parent image license - # space is a superset of the child image license space (and - # since the api consumer must accept licenses in the parent - # before we'll do anything, we'll assume licenses in the child - # are accepted as well). - # - kwargs["accept"] = True - - if "li_pkg_updates" in api_kwargs: - # option specific to: attach, set-property-linked, sync - kwargs["li_pkg_updates"] = api_kwargs["li_pkg_updates"] - - if op == pkgdefs.PKG_OP_INSTALL: - assert "pkgs_inst" in api_kwargs - # option specific to: install - kwargs["pkgs_inst"] = api_kwargs["pkgs_inst"] - kwargs["reject_list"] = api_kwargs["reject_list"] - elif op == pkgdefs.PKG_OP_CHANGE_VARIANT: - assert "variants" in api_kwargs - # option specific to: change-variant - kwargs["variants"] = api_kwargs["variants"] - kwargs["facets"] = None - kwargs["reject_list"] = api_kwargs["reject_list"] - elif op == pkgdefs.PKG_OP_CHANGE_FACET: - assert "facets" in api_kwargs - # option specific to: change-facet - kwargs["facets"] = api_kwargs["facets"] - kwargs["variants"] = None - kwargs["reject_list"] = api_kwargs["reject_list"] - elif op == pkgdefs.PKG_OP_UNINSTALL: - assert "pkgs_to_uninstall" in api_kwargs - # option specific to: uninstall - kwargs["pkgs_to_uninstall"] = \ - api_kwargs["pkgs_to_uninstall"] - del kwargs["show_licenses"] - del kwargs["refresh_catalogs"] - del kwargs["accept"] - elif op == pkgdefs.PKG_OP_UPDATE: - # skip ipkg up to date check for child images - kwargs["force"] = True - kwargs["pkgs_update"] = api_kwargs["pkgs_update"] - kwargs["reject_list"] = api_kwargs["reject_list"] - - return kwargs - - def api_recurse_plan(self, api_kwargs, erecurse_list, refresh_catalogs, - update_index, progtrack): - """Plan child image updates.""" - - pd = self.__img.imageplan.pd - api_op = pd.plan_type - - pd.child_op_vectors = [] - - # Get LinkedImageNames of all children - lin_list = list(self.__lic_dict.keys()) - - pkg_op_irecurse, pkg_op_erecurse, ignore_syncmd_nop = \ - self.__recursion_ops(api_op) - - # Prepare op vector for explicit recurse operations - if erecurse_list: - assert pkg_op_erecurse - # remove recurse children from sync list - lin_list = list(set(lin_list) - set(erecurse_list)) - - erecurse_kwargs = self.__recursion_args(pkg_op_erecurse, - refresh_catalogs, update_index, api_kwargs) - pd.child_op_vectors.append((pkg_op_erecurse, - list(erecurse_list), erecurse_kwargs, True)) - - # Prepare op vector for implicit recurse operations - irecurse_kwargs = self.__recursion_args(pkg_op_irecurse, - refresh_catalogs, update_index, api_kwargs) - - pd.child_op_vectors.append((pkg_op_irecurse, lin_list, - irecurse_kwargs, ignore_syncmd_nop)) - - pd.children_ignored = self.__lic_ignore - - # recurse into children - for p_dict in self.__api_recurse(pkgdefs.API_STAGE_PLAN, - progtrack): - yield p_dict - - def api_recurse_prepare(self, progtrack): - """Prepare child image updates.""" - progtrack.set_major_phase(progtrack.PHASE_DOWNLOAD) - list(self.__api_recurse(pkgdefs.API_STAGE_PREPARE, progtrack)) - - def api_recurse_execute(self, progtrack): - """Execute child image updates.""" - progtrack.set_major_phase(progtrack.PHASE_FINALIZE) - list(self.__api_recurse(pkgdefs.API_STAGE_EXECUTE, progtrack)) - - def init_plan(self, pd): - """Initialize our state in the PlanDescription.""" - - # if we're a child, save our parent package state into the - # plan description - pd.li_props = rm_dict_ent(self.__props.copy(), temporal_props) - pd.li_ppkgs = self.__ppkgs - pd.li_ppubs = self.__ppubs - pd.li_pfacets = self.__pfacets - - def setup_plan(self, pd): - """Reload a previously created plan.""" - - # make a copy of the linked image properties - props = pd.li_props.copy() - - # generate temporal properties - if props: - self.__set_current_path(props) - - # load linked image state from the plan - self.__update_props(props) - self.__ppubs = pd.li_ppubs - self.__ppkgs = pd.li_ppkgs - self.__pfacets = pd.li_pfacets - - # now initialize our recursion state, this involves allocating - # handles to operate on children. we don't need handles for - # children that were either ignored during planning, or which - # return EXIT_NOP after planning (since these children don't - # need any updates). - li_ignore = copy.copy(pd.children_ignored) - - # merge the children that returned nop into li_ignore (since - # we don't need to recurse into them). if li_ignore is [], - # then we ignored all children during planning - if li_ignore != [] and pd.children_nop: - if li_ignore is None: - # no children were ignored during planning - li_ignore = [] - li_ignore += pd.children_nop - - # Initialize children - self.__recursion_init(li_ignore=li_ignore) - - def recurse_nothingtodo(self): - """Return True if there is no planned work to do on child - image.""" - - for lic in six.itervalues(self.__lic_dict): - if lic.child_name not in \ - self.__img.imageplan.pd.children_nop: - return False - return True + done = lic_num - len(lic_setup) - len(lic_running) + lin_running = sorted([lic.child_name for lic in lic_running]) + _progtrack.li_recurse_status(lin_running, done) + + # poll on all the linked image children and see which + # ones have pending output. + fd_hash = dict([(lic.fileno(), lic) for lic in lic_running]) + p = select.poll() + for fd in fd_hash.keys(): + p.register(fd, select.POLLIN) + events = p.poll() + lic_list = [fd_hash[event[0]] for event in events] + + for lic in lic_list: + _progtrack.li_recurse_progress(lic.child_name) + if not lic.child_op_is_done(): + continue + # a child finished processing + progtrack_update = True + for p_dict in __child_op_finish( + lic, + lic_running, + _rvdict, + _progtrack, + _failfast, + _expect_plan, + ): + yield p_dict + + _li_rvdict_check(_rvdict) + if lic_num: + _progtrack.li_recurse_end() + + def __children_init(self, lin_list=None, li_ignore=None, failfast=True): + """Initialize LinkedImageChild objects for children specified + in 'lin_list'. If 'lin_list' is not specified, then + initialize objects for all children (excluding any being + ignored via 'li_ignore').""" + + # you can't specify children to operate on and children to be + # ignored at the same time + assert lin_list is None or li_ignore is None + + # if no children we listed, build a list of children + if lin_list is None: + lin_list = [i[0] for i in self.__list_children(li_ignore)] + else: + self.verify_names(lin_list) + + rvdict = {} + lic_dict = {} + for lin in lin_list: + try: + lic = LinkedImageChild(self, lin) + lic_dict[lin] = lic + except apx.LinkedImageException as e: + rvdict[lin] = LI_RVTuple(e.lix_exitrv, e, None) + + if failfast: + _li_rvdict_raise_exceptions(rvdict) + return lic_dict + + return (lic_dict, rvdict) + + def __recursion_init(self, li_ignore): + """Initialize child objects used during recursive packaging + operations.""" + + self.__lic_ignore = li_ignore + self.__lic_dict = self.__children_init(li_ignore=li_ignore) + + def api_recurse_init(self, li_ignore=None, repos=None): + """Initialize planning state. If we're a child image we save + our current state (which may reflect a planned state that we + have not committed to disk) into the plan. We also initialize + all our children to prepare to recurse into them.""" + + if PROP_RECURSE in self.__props and not self.__props[PROP_RECURSE]: + # we don't want to recurse + self.__recursion_init(li_ignore=[]) + return + + # Initialize children + self.__recursion_init(li_ignore) + + if not self.__lic_dict: + # we don't need to recurse + return + + # if we have any children we don't support operations using + # temporary repositories. + if repos: + raise apx.PlanCreationException(no_tmp_origins=True) + + def api_recurse_pubcheck(self, progtrack): + """Do a recursive publisher check""" + + # get a list of of children to recurse into. + lic_list = list(self.__lic_dict.values()) + + # do a publisher check on all of them + rvdict = {} + list( + self.__children_op( + _pkg_op=pkgdefs.PKG_OP_PUBCHECK, + _lic_list=lic_list, + _rvdict=rvdict, + _progtrack=progtrack, + _failfast=False, + ) + ) + + # raise an exception if one or more children failed the + # publisher check. + _li_rvdict_raise_exceptions(rvdict) + + def api_recurse_hfo_cleanup(self, progtrack): + """Do a recursive hot-fix origin cleanup""" + + # get a list of of children to recurse into. + lic_list = list(self.__lic_dict.values()) + + rvdict = {} + list( + self.__children_op( + _pkg_op=pkgdefs.PKG_OP_HOTFIX_CLEANUP, + _lic_list=lic_list, + _rvdict=rvdict, + _progtrack=progtrack, + _failfast=False, + ) + ) + + # raise an exception if one or more children failed + _li_rvdict_raise_exceptions(rvdict) + + def __api_recurse(self, stage, progtrack): + """This is an iterator function. It recurses into linked + image children to perform the specified operation. + """ + # get a pointer to the current image plan + pd = self.__img.imageplan.pd -class LinkedImageChild(object): - """A LinkedImageChild object is used when a parent image wants to - access a child image. These accesses may include things like: - saving/pushing linked image metadata into a child image, syncing or - auditing a child image, or recursing into a child image to keep it in - sync with planned changes in the parent image.""" - - def __init__(self, li, lin): - assert isinstance(li, LinkedImage), \ - "isinstance({0}, LinkedImage)".format(type(li)) - assert isinstance(lin, LinkedImageName), \ - "isinstance({0}, LinkedImageName)".format(type(lin)) - - # globals - self.__linked = li - self.__img = li.image - - # cache properties. - self.__props = self.__linked.child_props(lin) - assert self.__props[PROP_NAME] == lin + # get a list of of children to recurse into. + lic_list = list(self.__lic_dict.values()) - try: - imgdir = ar.ar_img_prefix(self.child_path) - except OSError as e: - raise apx.LinkedImageException(lin=lin, - child_op_failed=("find", self.child_path, e)) - - if not imgdir: - raise apx.LinkedImageException( - lin=lin, child_bad_img=self.child_path) - - # initialize paths for linked image data files - self.__path_ppkgs = os.path.join(imgdir, PATH_PPKGS) - self.__path_prop = os.path.join(imgdir, PATH_PROP) - self.__path_ppubs = os.path.join(imgdir, PATH_PUBS) - self.__path_pfacets = os.path.join(imgdir, PATH_PFACETS) - - # initialize a linked image child plugin - self.__plugin = \ - pkg.client.linkedimage.p_classes_child[lin.lin_type](self) - - self.__pkg_remote = pkg.client.pkgremote.PkgRemote() - self.__child_op_rvtuple = None - self.__child_op = None - - @property - def child_name(self): - """Get the name associated with a child image.""" - return self.__props[PROP_NAME] - - @property - def child_path(self): - """Get the path associated with a child image.""" - - if self.__linked.inaltroot(): - return self.__props[PROP_CURRENT_PATH] - return self.__props[PROP_PATH] - - @property - def child_pimage(self): - """Get a pointer to the parent image object associated with - this child.""" - return self.__img - - def __push_data(self, root, path, data, tmp, test): - """Write data to a child image.""" + # sanity check stage + assert stage in [ + pkgdefs.API_STAGE_PLAN, + pkgdefs.API_STAGE_PREPARE, + pkgdefs.API_STAGE_EXECUTE, + ] - try: - # first save our data to a temporary file - path_tmp = "{0}.{1}".format(path, - global_settings.client_runid) - save_data(path_tmp, data, root=root, - catch_exception=False) - - # Check if the data is changing. To do this - # comparison we load the serialized on-disk json data - # into memory because there are no guarantees about - # data ordering during serialization. When loading - # the data we don't bother decoding it into objects. - updated = True - old_data = load_data(path, missing_ok=True, - root=root, decode=False, - catch_exception=False) - if old_data is not None: - new_data = load_data(path_tmp, - root=root, decode=False, - catch_exception=False) - # We regard every combination of the same - # elements in a list being the same data, for - # example, ["a", "b"] equals ["b", "a"], so we - # need to sort the list first before comparison - # because ["a", "b"] != ["b", "a"] in Python. - if isinstance(old_data, list) and \ - isinstance(new_data, list): - old_data = sorted(old_data) - new_data = sorted(new_data) - if old_data == new_data: - updated = False - - - # If we're not actually updating any data, or if we - # were just doing a test to see if the data has - # changed, then delete the temporary data file. - if not updated or test: - ar.ar_unlink(root, path_tmp) - return updated - - if not tmp: - ar.ar_rename(root, path_tmp, path) - - except OSError as e: - raise apx.LinkedImageException(lin=self.child_name, - child_op_failed=("metadata update", - self.child_path, e)) + # if we're ignoring all children then we can't be recursing + assert pd.children_ignored != [] or lic_list == [] - return True + # sanity check the plan description state + if stage == pkgdefs.API_STAGE_PLAN: + # the state should be uninitialized + assert pd.children_planned == [] + assert pd.children_nop == [] + else: + # if we ignored all children, we better not have + # recursed into any children. + assert ( + pd.children_ignored != [] + or pd.children_planned == pd.children_nop == [] + ) + + # there shouldn't be any overlap between sets of + # children in the plan + assert not (set(pd.children_planned) & set(pd.children_nop)) + if pd.children_ignored: + assert not (set(pd.children_ignored) & set(pd.children_planned)) + assert not (set(pd.children_ignored) & set(pd.children_nop)) + + # make sure set of child handles matches the set of + # previously planned children. + assert set(self.__lic_dict) == set(pd.children_planned) + + # if we're in the planning stage, we should pass the current + # image plan onto the child and also expect an image plan from + # the child. + expect_plan = False + if stage == pkgdefs.API_STAGE_PLAN: + expect_plan = True + + # Assemble list of LICs from LINs in pd.child_op_vectors and + # create new lic_op_vectors to pass to __children_op_vec(). + lic_op_vectors = [] + for op, lin_list, kwargs, ignore_syncmd_nop in pd.child_op_vectors: + assert "stage" not in kwargs + lic_list = [] + for l in lin_list: + try: + lic_list.append(self.__lic_dict[l]) + except KeyError: + # For the prepare and execute phase we + # remove children for which there is + # nothing to do from self.__lic_dict. + # So ignore those we can't find. + pass + lic_op_vectors.append((op, lic_list, kwargs, ignore_syncmd_nop)) + + rvdict = {} + for p_dict in self.__children_op_vec( + _lic_op_vectors=lic_op_vectors, + _rvdict=rvdict, + _progtrack=progtrack, + _failfast=True, + _expect_plan=expect_plan, + stage=stage, + _pd=pd, + ): + yield p_dict + + assert not _li_rvdict_exceptions(rvdict) + + for lin in rvdict: + # check for children that don't need any updates + if rvdict[lin].rvt_rv == pkgdefs.EXIT_NOP: + assert lin not in pd.children_nop + pd.children_nop.append(lin) + del self.__lic_dict[lin] + + # record the children that are done planning + if ( + stage == pkgdefs.API_STAGE_PLAN + and rvdict[lin].rvt_rv == pkgdefs.EXIT_OK + ): + assert lin not in pd.children_planned + pd.children_planned.append(lin) + + @staticmethod + def __recursion_ops(api_op): + """Determine what pkg command to use when recursing into child + images.""" - def __push_ppkgs(self, ppkgs, tmp=False, test=False): - """Sync linked image parent constraint data to a child image. + # + # given the api operation being performed on the current + # image, figure out what api operation should be performed on + # child images. + # + # the recursion policy which hard coded here is that if we do + # an pkg update in the parent image without any packages + # specified (ie, we want to update everything) then when we + # recurse we'll also do an update of everything. but if we're + # doing any other operation like install, uninstall, an update + # of specific packages, etc, then when we recurse we'll do a + # sync in the child. + # - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" + # To improve performance we assume the child is already in sync, + # so if its linked image metadata isn't changing then the child + # won't need any updates so there will be no need to recurse + # into it. + ignore_syncmd_nop = False + pkg_op_erecurse = None + + if api_op == pkgdefs.API_OP_SYNC: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC + # If we are doing an explicit sync, we do have to make + # sure we actually recurse into the child and sync + # metadata. + ignore_syncmd_nop = True + elif api_op == pkgdefs.API_OP_INSTALL: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC + pkg_op_erecurse = pkgdefs.PKG_OP_INSTALL + elif api_op == pkgdefs.API_OP_CHANGE_FACET: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC + pkg_op_erecurse = pkgdefs.PKG_OP_CHANGE_FACET + elif api_op == pkgdefs.API_OP_CHANGE_VARIANT: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC + pkg_op_erecurse = pkgdefs.PKG_OP_CHANGE_VARIANT + if api_op == pkgdefs.API_OP_UPDATE: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC + pkg_op_erecurse = pkgdefs.PKG_OP_UPDATE + elif api_op == pkgdefs.API_OP_UNINSTALL: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC + pkg_op_erecurse = pkgdefs.PKG_OP_UNINSTALL + else: + pkg_op_irecurse = pkgdefs.PKG_OP_SYNC - # save the planned parent packages - return self.__push_data(self.child_path, self.__path_ppkgs, - ppkgs, tmp, test) + return pkg_op_irecurse, pkg_op_erecurse, ignore_syncmd_nop - def __push_pfacets(self, pfacets, tmp=False, test=False): - """Sync linked image parent facet data to a child image. + @staticmethod + def __recursion_args(op, refresh_catalogs, update_index, api_kwargs): + """Determine what pkg command arguments to use when recursing + into child images.""" - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" + kwargs = {} + kwargs["noexecute"] = api_kwargs["noexecute"] + kwargs["refresh_catalogs"] = refresh_catalogs + kwargs["show_licenses"] = False + kwargs["update_index"] = update_index - # save the planned parent facets - return self.__push_data(self.child_path, self.__path_pfacets, - pfacets, tmp, test) + # + # when we recurse we always accept all new licenses (for now). + # + # ultimately (when start yielding back plan descriptions for + # children) in addition to accepting licenses on the plan for + # the current image the api client will also have to + # explicitly accept licenses for all child images. but until + # that happens we'll just assume that the parent image license + # space is a superset of the child image license space (and + # since the api consumer must accept licenses in the parent + # before we'll do anything, we'll assume licenses in the child + # are accepted as well). + # + kwargs["accept"] = True + + if "li_pkg_updates" in api_kwargs: + # option specific to: attach, set-property-linked, sync + kwargs["li_pkg_updates"] = api_kwargs["li_pkg_updates"] + + if op == pkgdefs.PKG_OP_INSTALL: + assert "pkgs_inst" in api_kwargs + # option specific to: install + kwargs["pkgs_inst"] = api_kwargs["pkgs_inst"] + kwargs["reject_list"] = api_kwargs["reject_list"] + elif op == pkgdefs.PKG_OP_CHANGE_VARIANT: + assert "variants" in api_kwargs + # option specific to: change-variant + kwargs["variants"] = api_kwargs["variants"] + kwargs["facets"] = None + kwargs["reject_list"] = api_kwargs["reject_list"] + elif op == pkgdefs.PKG_OP_CHANGE_FACET: + assert "facets" in api_kwargs + # option specific to: change-facet + kwargs["facets"] = api_kwargs["facets"] + kwargs["variants"] = None + kwargs["reject_list"] = api_kwargs["reject_list"] + elif op == pkgdefs.PKG_OP_UNINSTALL: + assert "pkgs_to_uninstall" in api_kwargs + # option specific to: uninstall + kwargs["pkgs_to_uninstall"] = api_kwargs["pkgs_to_uninstall"] + del kwargs["show_licenses"] + del kwargs["refresh_catalogs"] + del kwargs["accept"] + elif op == pkgdefs.PKG_OP_UPDATE: + # skip ipkg up to date check for child images + kwargs["force"] = True + kwargs["pkgs_update"] = api_kwargs["pkgs_update"] + kwargs["reject_list"] = api_kwargs["reject_list"] + + return kwargs + + def api_recurse_plan( + self, + api_kwargs, + erecurse_list, + refresh_catalogs, + update_index, + progtrack, + ): + """Plan child image updates.""" + + pd = self.__img.imageplan.pd + api_op = pd.plan_type + + pd.child_op_vectors = [] + + # Get LinkedImageNames of all children + lin_list = list(self.__lic_dict.keys()) + + ( + pkg_op_irecurse, + pkg_op_erecurse, + ignore_syncmd_nop, + ) = self.__recursion_ops(api_op) + + # Prepare op vector for explicit recurse operations + if erecurse_list: + assert pkg_op_erecurse + # remove recurse children from sync list + lin_list = list(set(lin_list) - set(erecurse_list)) + + erecurse_kwargs = self.__recursion_args( + pkg_op_erecurse, refresh_catalogs, update_index, api_kwargs + ) + pd.child_op_vectors.append( + (pkg_op_erecurse, list(erecurse_list), erecurse_kwargs, True) + ) + + # Prepare op vector for implicit recurse operations + irecurse_kwargs = self.__recursion_args( + pkg_op_irecurse, refresh_catalogs, update_index, api_kwargs + ) + + pd.child_op_vectors.append( + (pkg_op_irecurse, lin_list, irecurse_kwargs, ignore_syncmd_nop) + ) + + pd.children_ignored = self.__lic_ignore + + # recurse into children + for p_dict in self.__api_recurse(pkgdefs.API_STAGE_PLAN, progtrack): + yield p_dict + + def api_recurse_prepare(self, progtrack): + """Prepare child image updates.""" + progtrack.set_major_phase(progtrack.PHASE_DOWNLOAD) + list(self.__api_recurse(pkgdefs.API_STAGE_PREPARE, progtrack)) + + def api_recurse_execute(self, progtrack): + """Execute child image updates.""" + progtrack.set_major_phase(progtrack.PHASE_FINALIZE) + list(self.__api_recurse(pkgdefs.API_STAGE_EXECUTE, progtrack)) + + def init_plan(self, pd): + """Initialize our state in the PlanDescription.""" + + # if we're a child, save our parent package state into the + # plan description + pd.li_props = rm_dict_ent(self.__props.copy(), temporal_props) + pd.li_ppkgs = self.__ppkgs + pd.li_ppubs = self.__ppubs + pd.li_pfacets = self.__pfacets + + def setup_plan(self, pd): + """Reload a previously created plan.""" + + # make a copy of the linked image properties + props = pd.li_props.copy() + + # generate temporal properties + if props: + self.__set_current_path(props) + + # load linked image state from the plan + self.__update_props(props) + self.__ppubs = pd.li_ppubs + self.__ppkgs = pd.li_ppkgs + self.__pfacets = pd.li_pfacets + + # now initialize our recursion state, this involves allocating + # handles to operate on children. we don't need handles for + # children that were either ignored during planning, or which + # return EXIT_NOP after planning (since these children don't + # need any updates). + li_ignore = copy.copy(pd.children_ignored) + + # merge the children that returned nop into li_ignore (since + # we don't need to recurse into them). if li_ignore is [], + # then we ignored all children during planning + if li_ignore != [] and pd.children_nop: + if li_ignore is None: + # no children were ignored during planning + li_ignore = [] + li_ignore += pd.children_nop + + # Initialize children + self.__recursion_init(li_ignore=li_ignore) + + def recurse_nothingtodo(self): + """Return True if there is no planned work to do on child + image.""" + + for lic in six.itervalues(self.__lic_dict): + if lic.child_name not in self.__img.imageplan.pd.children_nop: + return False + return True - def __push_props(self, tmp=False, test=False): - """Sync linked image properties data to a child image. +class LinkedImageChild(object): + """A LinkedImageChild object is used when a parent image wants to + access a child image. These accesses may include things like: + saving/pushing linked image metadata into a child image, syncing or + auditing a child image, or recursing into a child image to keep it in + sync with planned changes in the parent image.""" + + def __init__(self, li, lin): + assert isinstance( + li, LinkedImage + ), "isinstance({0}, LinkedImage)".format(type(li)) + assert isinstance( + lin, LinkedImageName + ), "isinstance({0}, LinkedImageName)".format(type(lin)) + + # globals + self.__linked = li + self.__img = li.image + + # cache properties. + self.__props = self.__linked.child_props(lin) + assert self.__props[PROP_NAME] == lin - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" + try: + imgdir = ar.ar_img_prefix(self.child_path) + except OSError as e: + raise apx.LinkedImageException( + lin=lin, child_op_failed=("find", self.child_path, e) + ) + + if not imgdir: + raise apx.LinkedImageException( + lin=lin, child_bad_img=self.child_path + ) + + # initialize paths for linked image data files + self.__path_ppkgs = os.path.join(imgdir, PATH_PPKGS) + self.__path_prop = os.path.join(imgdir, PATH_PROP) + self.__path_ppubs = os.path.join(imgdir, PATH_PUBS) + self.__path_pfacets = os.path.join(imgdir, PATH_PFACETS) + + # initialize a linked image child plugin + self.__plugin = pkg.client.linkedimage.p_classes_child[lin.lin_type]( + self + ) + + self.__pkg_remote = pkg.client.pkgremote.PkgRemote() + self.__child_op_rvtuple = None + self.__child_op = None + + @property + def child_name(self): + """Get the name associated with a child image.""" + return self.__props[PROP_NAME] + + @property + def child_path(self): + """Get the path associated with a child image.""" + + if self.__linked.inaltroot(): + return self.__props[PROP_CURRENT_PATH] + return self.__props[PROP_PATH] + + @property + def child_pimage(self): + """Get a pointer to the parent image object associated with + this child.""" + return self.__img + + def __push_data(self, root, path, data, tmp, test): + """Write data to a child image.""" - # make a copy of the props we want to push - props = self.__props.copy() - assert PROP_PARENT_PATH not in props + try: + # first save our data to a temporary file + path_tmp = "{0}.{1}".format(path, global_settings.client_runid) + save_data(path_tmp, data, root=root, catch_exception=False) + + # Check if the data is changing. To do this + # comparison we load the serialized on-disk json data + # into memory because there are no guarantees about + # data ordering during serialization. When loading + # the data we don't bother decoding it into objects. + updated = True + old_data = load_data( + path, + missing_ok=True, + root=root, + decode=False, + catch_exception=False, + ) + if old_data is not None: + new_data = load_data( + path_tmp, root=root, decode=False, catch_exception=False + ) + # We regard every combination of the same + # elements in a list being the same data, for + # example, ["a", "b"] equals ["b", "a"], so we + # need to sort the list first before comparison + # because ["a", "b"] != ["b", "a"] in Python. + if isinstance(old_data, list) and isinstance(new_data, list): + old_data = sorted(old_data) + new_data = sorted(new_data) + if old_data == new_data: + updated = False + + # If we're not actually updating any data, or if we + # were just doing a test to see if the data has + # changed, then delete the temporary data file. + if not updated or test: + ar.ar_unlink(root, path_tmp) + return updated + + if not tmp: + ar.ar_rename(root, path_tmp, path) - self.__plugin.munge_props(props) + except OSError as e: + raise apx.LinkedImageException( + lin=self.child_name, + child_op_failed=("metadata update", self.child_path, e), + ) + + return True + + def __push_ppkgs(self, ppkgs, tmp=False, test=False): + """Sync linked image parent constraint data to a child image. + + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" + + # save the planned parent packages + return self.__push_data( + self.child_path, self.__path_ppkgs, ppkgs, tmp, test + ) - # delete temporal properties - props = rm_dict_ent(props, temporal_props) - return self.__push_data(self.child_path, self.__path_prop, - props, tmp, test) + def __push_pfacets(self, pfacets, tmp=False, test=False): + """Sync linked image parent facet data to a child image. - def __push_ppubs(self, ppubs, tmp=False, test=False): - """Sync linked image parent publisher data to a child image. + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" + # save the planned parent facets + return self.__push_data( + self.child_path, self.__path_pfacets, pfacets, tmp, test + ) - return self.__push_data(self.child_path, self.__path_ppubs, - ppubs, tmp, test) + def __push_props(self, tmp=False, test=False): + """Sync linked image properties data to a child image. - def __syncmd(self, pmd, tmp=False, test=False): - """Sync linked image data to a child image. + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" - 'tmp' determines if we should read/write to the official - linked image metadata files, or if we should access temporary - versions (which have "." appended to them.""" + # make a copy of the props we want to push + props = self.__props.copy() + assert PROP_PARENT_PATH not in props - # unpack parent metadata tuple - ppubs, ppkgs, pfacets = pmd + self.__plugin.munge_props(props) - ppkgs_updated = self.__push_ppkgs(ppkgs, tmp, test) - props_updated = self.__push_props(tmp, test) - pubs_updated = self.__push_ppubs(ppubs, tmp, test) - pfacets_updated = self.__push_pfacets(pfacets, tmp, test) + # delete temporal properties + props = rm_dict_ent(props, temporal_props) + return self.__push_data( + self.child_path, self.__path_prop, props, tmp, test + ) - return (props_updated or ppkgs_updated or pubs_updated or - pfacets_updated) + def __push_ppubs(self, ppubs, tmp=False, test=False): + """Sync linked image parent publisher data to a child image. - def __child_op_setup_syncmd(self, pmd, ignore_syncmd_nop=True, - tmp=False, test=False, stage=pkgdefs.API_STAGE_DEFAULT): - """Prepare to perform an operation on a child image by syncing - the latest linked image data to that image. As part of this - operation, if we discover that the meta data hasn't changed we - may report back that there is nothing to do (EXIT_NOP). + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" - 'pmd' is a tuple that contains parent metadata that we will - sync to the child image. Note this is not all the metadata - that we will sync, just the set which is common to all - children. + return self.__push_data( + self.child_path, self.__path_ppubs, ppubs, tmp, test + ) - 'ignore_syncmd_nop' a boolean that indicates if we should - always recurse into a child even if the linked image meta data - isn't changing. + def __syncmd(self, pmd, tmp=False, test=False): + """Sync linked image data to a child image. - 'tmp' a boolean that indicates if we should save the child - image meta data into temporary files (instead of overwriting - the persistent meta data files). + 'tmp' determines if we should read/write to the official + linked image metadata files, or if we should access temporary + versions (which have "." appended to them.""" - 'test' a boolean that indicates we shouldn't save any child - image meta data, instead we should just test to see if the - meta data is changing. + # unpack parent metadata tuple + ppubs, ppkgs, pfacets = pmd - 'stage' indicates which stage of execution we should be - performing on a child image.""" + ppkgs_updated = self.__push_ppkgs(ppkgs, tmp, test) + props_updated = self.__push_props(tmp, test) + pubs_updated = self.__push_ppubs(ppubs, tmp, test) + pfacets_updated = self.__push_pfacets(pfacets, tmp, test) - # we don't update metadata during all stages of operation - if stage not in [ - pkgdefs.API_STAGE_DEFAULT, pkgdefs.API_STAGE_PLAN]: - return True + return props_updated or ppkgs_updated or pubs_updated or pfacets_updated - try: - updated = self.__syncmd(pmd, tmp=tmp, test=test) - except apx.LinkedImageException as e: - self.__child_op_rvtuple = \ - LI_RVTuple(e.lix_exitrv, e, None) - return False + def __child_op_setup_syncmd( + self, + pmd, + ignore_syncmd_nop=True, + tmp=False, + test=False, + stage=pkgdefs.API_STAGE_DEFAULT, + ): + """Prepare to perform an operation on a child image by syncing + the latest linked image data to that image. As part of this + operation, if we discover that the meta data hasn't changed we + may report back that there is nothing to do (EXIT_NOP). - if ignore_syncmd_nop: - # we successfully updated the metadata - return True + 'pmd' is a tuple that contains parent metadata that we will + sync to the child image. Note this is not all the metadata + that we will sync, just the set which is common to all + children. - # if the metadata changed then report success - if updated: - return True + 'ignore_syncmd_nop' a boolean that indicates if we should + always recurse into a child even if the linked image meta data + isn't changing. - # the metadata didn't change, so this operation is a NOP - self.__child_op_rvtuple = \ - LI_RVTuple(pkgdefs.EXIT_NOP, None, None) - return False + 'tmp' a boolean that indicates if we should save the child + image meta data into temporary files (instead of overwriting + the persistent meta data files). - def __child_setup_sync(self, _pmd, _progtrack, _ignore_syncmd_nop, - _syncmd_tmp, - accept=False, - li_md_only=False, - li_pkg_updates=True, - noexecute=False, - refresh_catalogs=True, - reject_list=misc.EmptyI, - show_licenses=False, - stage=pkgdefs.API_STAGE_DEFAULT, - update_index=True): - """Prepare to sync a child image. This involves updating the - linked image metadata in the child and then possibly recursing - into the child to actually update packages. - - For descriptions of parameters please see the descriptions in - api.py`gen_plan_*""" - - if li_md_only: - # - # we're not going to recurse into the child image, - # we're just going to update its metadata. - # - # we don't support updating packages in the parent - # during attach metadata only sync. - # - if not self.__child_op_setup_syncmd(_pmd, - ignore_syncmd_nop=False, - test=noexecute, stage=stage): - # the update failed - return - self.__child_op_rvtuple = \ - LI_RVTuple(pkgdefs.EXIT_OK, None, None) - return - - # - # first sync the metadata - # - # if we're doing this sync as part of an attach, then - # temporarily sync the metadata since we don't know yet if the - # attach will succeed. if the attach doesn't succeed this - # means we don't have to delete any metadata. if the attach - # succeeds the child will make the temporary metadata - # permanent as part of the commit. - # - # we don't support updating packages in the parent - # during attach. - # - if not self.__child_op_setup_syncmd(_pmd, - ignore_syncmd_nop=_ignore_syncmd_nop, - tmp=_syncmd_tmp, stage=stage): - # the update failed or the metadata didn't change - return - - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_SYNC, - accept=accept, - backup_be=None, - backup_be_name=None, - be_activate=True, - be_name=None, - li_ignore=None, - li_md_only=li_md_only, - li_parent_sync=True, - li_pkg_updates=li_pkg_updates, - li_target_all=False, - li_target_list=[], - new_be=None, - noexecute=noexecute, - origins=[], - parsable_version=\ - global_settings.client_output_parsable_version, - quiet=global_settings.client_output_quiet, - refresh_catalogs=refresh_catalogs, - reject_pats=reject_list, - show_licenses=show_licenses, - stage=stage, - update_index=update_index, - verbose=global_settings.client_output_verbose) - - def __child_setup_update(self, _pmd, _progtrack, _syncmd_tmp, - accept, force, noexecute, pkgs_update, refresh_catalogs, - reject_list, show_licenses, stage, update_index): - """Prepare to update a child image.""" - - # first sync the metadata - if not self.__child_op_setup_syncmd(_pmd, - ignore_syncmd_nop=True, - tmp=_syncmd_tmp, stage=stage): - # the update failed or the metadata didn't change - return - - # We need to make sure we don't pass None as pargs in - # client.py`update() - if pkgs_update is None: - pkgs_update = [] - - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_UPDATE, - act_timeout=0, - accept=accept, - backup_be=None, - backup_be_name=None, - be_activate=True, - be_name=None, - force=force, - ignore_missing=True, - li_erecurse=None, - li_ignore=None, - li_parent_sync=True, - new_be=None, - noexecute=noexecute, - origins=[], - pargs=pkgs_update, - parsable_version=\ - global_settings.client_output_parsable_version, - quiet=global_settings.client_output_quiet, - refresh_catalogs=refresh_catalogs, - reject_pats=reject_list, - show_licenses=show_licenses, - stage=stage, - update_index=update_index, - verbose=global_settings.client_output_verbose) - - def __child_setup_install(self, _pmd, _progtrack, _syncmd_tmp, - accept, noexecute, pkgs_inst, refresh_catalogs, reject_list, - show_licenses, stage, update_index): - """Prepare to install a pkg in a child image.""" - - # first sync the metadata - if not self.__child_op_setup_syncmd(_pmd, - ignore_syncmd_nop=True, - tmp=_syncmd_tmp, stage=stage): - # the update failed or the metadata didn't change - return - - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_INSTALL, - accept=accept, - act_timeout=0, - backup_be=None, - backup_be_name=None, - be_activate=True, - be_name=None, - li_erecurse=None, - li_ignore=None, - li_parent_sync=True, - new_be=None, - noexecute=noexecute, - origins=[], - pargs=pkgs_inst, - parsable_version=\ - global_settings.client_output_parsable_version, - quiet=global_settings.client_output_quiet, - refresh_catalogs=refresh_catalogs, - reject_pats=reject_list, - show_licenses=show_licenses, - stage=stage, - update_index=update_index, - verbose=global_settings.client_output_verbose) - - def __child_setup_uninstall(self, _pmd, _progtrack, _syncmd_tmp, - noexecute, pkgs_to_uninstall, stage, update_index): - """Prepare to install a pkg in a child image.""" - - # first sync the metadata - if not self.__child_op_setup_syncmd(_pmd, - ignore_syncmd_nop=True, - tmp=_syncmd_tmp, stage=stage): - # the update failed or the metadata didn't change - return - - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_UNINSTALL, - act_timeout=0, - backup_be=None, - backup_be_name=None, - be_activate=True, - be_name=None, - li_erecurse=None, - li_ignore=None, - li_parent_sync=True, - new_be=None, - noexecute=noexecute, - pargs=pkgs_to_uninstall, - parsable_version=\ - global_settings.client_output_parsable_version, - quiet=global_settings.client_output_quiet, - stage=stage, - update_index=update_index, - ignore_missing=True, - verbose=global_settings.client_output_verbose) - - def __child_setup_change_varcets(self, _pmd, _progtrack, _syncmd_tmp, - accept, facets, noexecute, refresh_catalogs, reject_list, - show_licenses, stage, update_index, variants): - """Prepare to install a pkg in a child image.""" - - # first sync the metadata - if not self.__child_op_setup_syncmd(_pmd, - ignore_syncmd_nop=True, - tmp=_syncmd_tmp, stage=stage): - # the update failed or the metadata didn't change - return - - assert not (variants and facets) - if variants: - op = pkgdefs.PKG_OP_CHANGE_VARIANT - varcet_dict = variants - else: - op = pkgdefs.PKG_OP_CHANGE_FACET - varcet_dict = facets - - # need to transform varcets back to string list - varcets = [ "{0}={1}".format(a, b) for (a, b) in - varcet_dict.items()] - - self.__pkg_remote.setup(self.child_path, - op, - accept=accept, - act_timeout=0, - backup_be=None, - backup_be_name=None, - be_activate=True, - be_name=None, - li_erecurse=None, - li_ignore=None, - li_parent_sync=True, - new_be=None, - noexecute=noexecute, - origins=[], - pargs=varcets, - parsable_version=\ - global_settings.client_output_parsable_version, - quiet=global_settings.client_output_quiet, - refresh_catalogs=refresh_catalogs, - reject_pats=reject_list, - show_licenses=show_licenses, - stage=stage, - update_index=update_index, - verbose=global_settings.client_output_verbose) - - def __child_setup_detach(self, _progtrack, li_md_only=False, - li_pkg_updates=True, noexecute=False): - """Prepare to detach a child image.""" - - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_DETACH, - force=True, - li_md_only=li_md_only, - li_pkg_updates=li_pkg_updates, - li_target_all=False, - li_target_list=[], - noexecute=noexecute, - quiet=global_settings.client_output_quiet, - verbose=global_settings.client_output_verbose) - - def __child_setup_pubcheck(self, _pmd): - """Prepare to a check if a child's publishers are in sync.""" - - # first sync the metadata - # a pubcheck should never update persistent meta data - if not self.__child_op_setup_syncmd(_pmd, tmp=True): - # the update failed - return - - # setup recursion into the child image - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_PUBCHECK) - - def __child_setup_hfo_cleanup(self, _pmd): - """Prepare to a clean up any stale hotfix origins.""" - - # set up recursion into the child image - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_HOTFIX_CLEANUP) - - def __child_setup_audit(self, _pmd): - """Prepare to a child image to see if it's in sync with its - constraints.""" - - # first sync the metadata - if not self.__child_op_setup_syncmd(_pmd, tmp=True): - # the update failed - return - - # setup recursion into the child image - self.__pkg_remote.setup(self.child_path, - pkgdefs.PKG_OP_AUDIT_LINKED, - li_parent_sync=True, - li_target_all=False, - li_target_list=[], - omit_headers=True, - quiet=True) - - def child_op_abort(self): - """Public interface to abort an operation on a child image.""" - - self.__pkg_remote.abort() - self.__child_op_rvtuple = None - self.__child_op = None - - def child_op_setup(self, _pkg_op, _pmd, _progtrack, _ignore_syncmd_nop, - _syncmd_tmp, **kwargs): - """Public interface to setup an operation that we'd like to - perform on a child image.""" - - assert self.__child_op_rvtuple is None - assert self.__child_op is None - - self.__child_op = _pkg_op - - if _pkg_op == pkgdefs.PKG_OP_AUDIT_LINKED: - self.__child_setup_audit(_pmd, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_DETACH: - self.__child_setup_detach(_progtrack, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_PUBCHECK: - self.__child_setup_pubcheck(_pmd, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: - self.__child_setup_hfo_cleanup(_pmd, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_SYNC: - self.__child_setup_sync(_pmd, _progtrack, - _ignore_syncmd_nop, _syncmd_tmp, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_UPDATE: - self.__child_setup_update(_pmd, _progtrack, - _syncmd_tmp, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_INSTALL: - self.__child_setup_install(_pmd, _progtrack, - _syncmd_tmp, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_UNINSTALL: - self.__child_setup_uninstall(_pmd, _progtrack, - _syncmd_tmp, **kwargs) - elif _pkg_op == pkgdefs.PKG_OP_CHANGE_FACET or \ - _pkg_op == pkgdefs.PKG_OP_CHANGE_VARIANT: - self.__child_setup_change_varcets(_pmd, _progtrack, - _syncmd_tmp, **kwargs) - else: - raise RuntimeError( - "Unsupported package client op: {0}".format( - _pkg_op)) - - def child_op_start(self): - """Public interface to start an operation on a child image.""" - - # if we have a return value this operation is done - if self.__child_op_rvtuple is not None: - return True - - self.__pkg_remote.start() - - def child_op_is_done(self): - """Public interface to query if an operation on a child image - is done.""" - - # if we have a return value this operation is done - if self.__child_op_rvtuple is not None: - return True - - # make sure there is some data from the child - return self.__pkg_remote.is_done() - - def child_op_rv(self, expect_plan): - """Public interface to get the result of an operation on a - child image. - - 'expect_plan' boolean indicating if the child is performing a - planning operation. this is needed because if we're running - in parsable output mode then the child will emit a parsable - json version of the plan on stdout, and we'll verify it by - running it through the json parser. - """ - - # The child op is now done, so we reset __child_op to make sure - # we don't accidentally reuse the LIC without properly setting - # it up again. However, we still need the op type in this - # function so we make a copy. - pkg_op = self.__child_op - self.__child_op = None - - # if we have a return value this operation is done - if self.__child_op_rvtuple is not None: - rvtuple = self.__child_op_rvtuple - self.__child_op_rvtuple = None - return (rvtuple, None, None) - - # make sure we're not going to block - assert self.__pkg_remote.is_done() - - (rv, e, stdout, stderr) = self.__pkg_remote.result() - if e is not None: - rv = pkgdefs.EXIT_OOPS - - # if we got an exception, or a return value other than OK or - # NOP, then return an exception. - if e is not None or \ - rv not in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]: - e = apx.LinkedImageException( - lin=self.child_name, exitrv=rv, - pkg_op_failed=(pkg_op, rv, stdout + stderr, e)) - rvtuple = LI_RVTuple(rv, e, None) - return (rvtuple, stdout, stderr) - - # check for NOP. - if rv == pkgdefs.EXIT_NOP: - assert e is None - rvtuple = LI_RVTuple(rv, None, None) - return (rvtuple, stdout, stderr) - - if global_settings.client_output_parsable_version is None or \ - not expect_plan: - rvtuple = LI_RVTuple(rv, None, None) - return (rvtuple, stdout, stderr) - - # If a plan was created and we're in parsable output mode then - # parse the plan that should have been displayed to stdout. - p_dict = None - try: - p_dict = json.loads(stdout) - except ValueError as e: - # JSON raises a subclass of ValueError when it - # can't parse a string. - - e = apx.LinkedImageException( - lin=self.child_name, - unparsable_output=(pkg_op, stdout + stderr, e)) - rvtuple = LI_RVTuple(rv, e, None) - return (rvtuple, stdout, stderr) - - p_dict["image-name"] = str(self.child_name) - rvtuple = LI_RVTuple(rv, None, p_dict) - return (rvtuple, stdout, stderr) - - def fileno(self): - """Return the progress pipe associated with the PkgRemote - instance that is operating on a child image.""" - return self.__pkg_remote.fileno() - - def child_init_root(self): - """Our image path is being updated, so figure out our new - child image paths. This interface only gets invoked when: - - - We're doing a packaging operation on a parent image and - we've just cloned that parent to create a new BE that we're - going to update. This clone also cloned all the children - and so now we need to update our paths to point to the newly - created children. - - - We tried to update a cloned image (as described above) and - our update failed, hence we're changing paths back to the - original images that were the source of the clone.""" - - # PROP_PARENT_PATH better not be present because - # LinkedImageChild objects are only used with push child - # images. - assert PROP_PARENT_PATH not in self.__props - - # Remove any path transform and reapply. - self.__props = rm_dict_ent(self.__props, temporal_props) - self.__linked.set_path_transform(self.__props, - self.__linked.get_path_transform(), - path=self.__props[PROP_PATH]) + 'test' a boolean that indicates we shouldn't save any child + image meta data, instead we should just test to see if the + meta data is changing. + + 'stage' indicates which stage of execution we should be + performing on a child image.""" + + # we don't update metadata during all stages of operation + if stage not in [pkgdefs.API_STAGE_DEFAULT, pkgdefs.API_STAGE_PLAN]: + return True + try: + updated = self.__syncmd(pmd, tmp=tmp, test=test) + except apx.LinkedImageException as e: + self.__child_op_rvtuple = LI_RVTuple(e.lix_exitrv, e, None) + return False -# --------------------------------------------------------------------------- -# Interfaces to obtain linked image metadata from an image -# -def get_pubs(img): - """Return publisher information for the specified image. + if ignore_syncmd_nop: + # we successfully updated the metadata + return True - Publisher information is returned in a sorted list of lists - of the format: - , + # if the metadata changed then report success + if updated: + return True - Where: - is a string - is a boolean + # the metadata didn't change, so this operation is a NOP + self.__child_op_rvtuple = LI_RVTuple(pkgdefs.EXIT_NOP, None, None) + return False - The tuples are sorted by publisher rank. - """ + def __child_setup_sync( + self, + _pmd, + _progtrack, + _ignore_syncmd_nop, + _syncmd_tmp, + accept=False, + li_md_only=False, + li_pkg_updates=True, + noexecute=False, + refresh_catalogs=True, + reject_list=misc.EmptyI, + show_licenses=False, + stage=pkgdefs.API_STAGE_DEFAULT, + update_index=True, + ): + """Prepare to sync a child image. This involves updating the + linked image metadata in the child and then possibly recursing + into the child to actually update packages. + + For descriptions of parameters please see the descriptions in + api.py`gen_plan_*""" + + if li_md_only: + # + # we're not going to recurse into the child image, + # we're just going to update its metadata. + # + # we don't support updating packages in the parent + # during attach metadata only sync. + # + if not self.__child_op_setup_syncmd( + _pmd, ignore_syncmd_nop=False, test=noexecute, stage=stage + ): + # the update failed + return + self.__child_op_rvtuple = LI_RVTuple(pkgdefs.EXIT_OK, None, None) + return - return [ - [str(p), p.sticky] - for p in img.get_sorted_publishers(inc_disabled=False) - if not p.nochild - ] + # + # first sync the metadata + # + # if we're doing this sync as part of an attach, then + # temporarily sync the metadata since we don't know yet if the + # attach will succeed. if the attach doesn't succeed this + # means we don't have to delete any metadata. if the attach + # succeeds the child will make the temporary metadata + # permanent as part of the commit. + # + # we don't support updating packages in the parent + # during attach. + # + if not self.__child_op_setup_syncmd( + _pmd, + ignore_syncmd_nop=_ignore_syncmd_nop, + tmp=_syncmd_tmp, + stage=stage, + ): + # the update failed or the metadata didn't change + return + + self.__pkg_remote.setup( + self.child_path, + pkgdefs.PKG_OP_SYNC, + accept=accept, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_ignore=None, + li_md_only=li_md_only, + li_parent_sync=True, + li_pkg_updates=li_pkg_updates, + li_target_all=False, + li_target_list=[], + new_be=None, + noexecute=noexecute, + origins=[], + parsable_version=global_settings.client_output_parsable_version, + quiet=global_settings.client_output_quiet, + refresh_catalogs=refresh_catalogs, + reject_pats=reject_list, + show_licenses=show_licenses, + stage=stage, + update_index=update_index, + verbose=global_settings.client_output_verbose, + ) + + def __child_setup_update( + self, + _pmd, + _progtrack, + _syncmd_tmp, + accept, + force, + noexecute, + pkgs_update, + refresh_catalogs, + reject_list, + show_licenses, + stage, + update_index, + ): + """Prepare to update a child image.""" + + # first sync the metadata + if not self.__child_op_setup_syncmd( + _pmd, ignore_syncmd_nop=True, tmp=_syncmd_tmp, stage=stage + ): + # the update failed or the metadata didn't change + return + + # We need to make sure we don't pass None as pargs in + # client.py`update() + if pkgs_update is None: + pkgs_update = [] + + self.__pkg_remote.setup( + self.child_path, + pkgdefs.PKG_OP_UPDATE, + act_timeout=0, + accept=accept, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + force=force, + ignore_missing=True, + li_erecurse=None, + li_ignore=None, + li_parent_sync=True, + new_be=None, + noexecute=noexecute, + origins=[], + pargs=pkgs_update, + parsable_version=global_settings.client_output_parsable_version, + quiet=global_settings.client_output_quiet, + refresh_catalogs=refresh_catalogs, + reject_pats=reject_list, + show_licenses=show_licenses, + stage=stage, + update_index=update_index, + verbose=global_settings.client_output_verbose, + ) + + def __child_setup_install( + self, + _pmd, + _progtrack, + _syncmd_tmp, + accept, + noexecute, + pkgs_inst, + refresh_catalogs, + reject_list, + show_licenses, + stage, + update_index, + ): + """Prepare to install a pkg in a child image.""" + + # first sync the metadata + if not self.__child_op_setup_syncmd( + _pmd, ignore_syncmd_nop=True, tmp=_syncmd_tmp, stage=stage + ): + # the update failed or the metadata didn't change + return + + self.__pkg_remote.setup( + self.child_path, + pkgdefs.PKG_OP_INSTALL, + accept=accept, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_erecurse=None, + li_ignore=None, + li_parent_sync=True, + new_be=None, + noexecute=noexecute, + origins=[], + pargs=pkgs_inst, + parsable_version=global_settings.client_output_parsable_version, + quiet=global_settings.client_output_quiet, + refresh_catalogs=refresh_catalogs, + reject_pats=reject_list, + show_licenses=show_licenses, + stage=stage, + update_index=update_index, + verbose=global_settings.client_output_verbose, + ) + + def __child_setup_uninstall( + self, + _pmd, + _progtrack, + _syncmd_tmp, + noexecute, + pkgs_to_uninstall, + stage, + update_index, + ): + """Prepare to install a pkg in a child image.""" + + # first sync the metadata + if not self.__child_op_setup_syncmd( + _pmd, ignore_syncmd_nop=True, tmp=_syncmd_tmp, stage=stage + ): + # the update failed or the metadata didn't change + return + + self.__pkg_remote.setup( + self.child_path, + pkgdefs.PKG_OP_UNINSTALL, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_erecurse=None, + li_ignore=None, + li_parent_sync=True, + new_be=None, + noexecute=noexecute, + pargs=pkgs_to_uninstall, + parsable_version=global_settings.client_output_parsable_version, + quiet=global_settings.client_output_quiet, + stage=stage, + update_index=update_index, + ignore_missing=True, + verbose=global_settings.client_output_verbose, + ) + + def __child_setup_change_varcets( + self, + _pmd, + _progtrack, + _syncmd_tmp, + accept, + facets, + noexecute, + refresh_catalogs, + reject_list, + show_licenses, + stage, + update_index, + variants, + ): + """Prepare to install a pkg in a child image.""" + + # first sync the metadata + if not self.__child_op_setup_syncmd( + _pmd, ignore_syncmd_nop=True, tmp=_syncmd_tmp, stage=stage + ): + # the update failed or the metadata didn't change + return + + assert not (variants and facets) + if variants: + op = pkgdefs.PKG_OP_CHANGE_VARIANT + varcet_dict = variants + else: + op = pkgdefs.PKG_OP_CHANGE_FACET + varcet_dict = facets + + # need to transform varcets back to string list + varcets = ["{0}={1}".format(a, b) for (a, b) in varcet_dict.items()] + + self.__pkg_remote.setup( + self.child_path, + op, + accept=accept, + act_timeout=0, + backup_be=None, + backup_be_name=None, + be_activate=True, + be_name=None, + li_erecurse=None, + li_ignore=None, + li_parent_sync=True, + new_be=None, + noexecute=noexecute, + origins=[], + pargs=varcets, + parsable_version=global_settings.client_output_parsable_version, + quiet=global_settings.client_output_quiet, + refresh_catalogs=refresh_catalogs, + reject_pats=reject_list, + show_licenses=show_licenses, + stage=stage, + update_index=update_index, + verbose=global_settings.client_output_verbose, + ) + + def __child_setup_detach( + self, _progtrack, li_md_only=False, li_pkg_updates=True, noexecute=False + ): + """Prepare to detach a child image.""" + + self.__pkg_remote.setup( + self.child_path, + pkgdefs.PKG_OP_DETACH, + force=True, + li_md_only=li_md_only, + li_pkg_updates=li_pkg_updates, + li_target_all=False, + li_target_list=[], + noexecute=noexecute, + quiet=global_settings.client_output_quiet, + verbose=global_settings.client_output_verbose, + ) + + def __child_setup_pubcheck(self, _pmd): + """Prepare to a check if a child's publishers are in sync.""" + + # first sync the metadata + # a pubcheck should never update persistent meta data + if not self.__child_op_setup_syncmd(_pmd, tmp=True): + # the update failed + return + + # setup recursion into the child image + self.__pkg_remote.setup(self.child_path, pkgdefs.PKG_OP_PUBCHECK) + + def __child_setup_hfo_cleanup(self, _pmd): + """Prepare to a clean up any stale hotfix origins.""" + + # set up recursion into the child image + self.__pkg_remote.setup(self.child_path, pkgdefs.PKG_OP_HOTFIX_CLEANUP) + + def __child_setup_audit(self, _pmd): + """Prepare to a child image to see if it's in sync with its + constraints.""" + + # first sync the metadata + if not self.__child_op_setup_syncmd(_pmd, tmp=True): + # the update failed + return + + # setup recursion into the child image + self.__pkg_remote.setup( + self.child_path, + pkgdefs.PKG_OP_AUDIT_LINKED, + li_parent_sync=True, + li_target_all=False, + li_target_list=[], + omit_headers=True, + quiet=True, + ) + + def child_op_abort(self): + """Public interface to abort an operation on a child image.""" + + self.__pkg_remote.abort() + self.__child_op_rvtuple = None + self.__child_op = None + + def child_op_setup( + self, + _pkg_op, + _pmd, + _progtrack, + _ignore_syncmd_nop, + _syncmd_tmp, + **kwargs, + ): + """Public interface to setup an operation that we'd like to + perform on a child image.""" + + assert self.__child_op_rvtuple is None + assert self.__child_op is None + + self.__child_op = _pkg_op + + if _pkg_op == pkgdefs.PKG_OP_AUDIT_LINKED: + self.__child_setup_audit(_pmd, **kwargs) + elif _pkg_op == pkgdefs.PKG_OP_DETACH: + self.__child_setup_detach(_progtrack, **kwargs) + elif _pkg_op == pkgdefs.PKG_OP_PUBCHECK: + self.__child_setup_pubcheck(_pmd, **kwargs) + elif _pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: + self.__child_setup_hfo_cleanup(_pmd, **kwargs) + elif _pkg_op == pkgdefs.PKG_OP_SYNC: + self.__child_setup_sync( + _pmd, _progtrack, _ignore_syncmd_nop, _syncmd_tmp, **kwargs + ) + elif _pkg_op == pkgdefs.PKG_OP_UPDATE: + self.__child_setup_update(_pmd, _progtrack, _syncmd_tmp, **kwargs) + elif _pkg_op == pkgdefs.PKG_OP_INSTALL: + self.__child_setup_install(_pmd, _progtrack, _syncmd_tmp, **kwargs) + elif _pkg_op == pkgdefs.PKG_OP_UNINSTALL: + self.__child_setup_uninstall( + _pmd, _progtrack, _syncmd_tmp, **kwargs + ) + elif ( + _pkg_op == pkgdefs.PKG_OP_CHANGE_FACET + or _pkg_op == pkgdefs.PKG_OP_CHANGE_VARIANT + ): + self.__child_setup_change_varcets( + _pmd, _progtrack, _syncmd_tmp, **kwargs + ) + else: + raise RuntimeError( + "Unsupported package client op: {0}".format(_pkg_op) + ) -def get_packages(img, pd=None): - """Figure out the current (or planned) list of packages in img.""" - - ppkgs = set(img.get_catalog(img.IMG_CATALOG_INSTALLED).fmris()) - - # if there's an image plan the we need to update the installed - # packages based on that plan. - if pd is not None: - for src, dst in pd.plan_desc: - if src == dst: - continue - if src: - assert src in ppkgs - ppkgs -= set([src]) - if dst: - assert dst not in ppkgs - ppkgs |= set([dst]) - - # paranoia - return frozenset(ppkgs) + def child_op_start(self): + """Public interface to start an operation on a child image.""" -def get_inheritable_facets(img, pd=None): - """Get Facets from an image that a child should inherit. + # if we have a return value this operation is done + if self.__child_op_rvtuple is not None: + return True - We only want to sync facets which affect packages that have parent - dependencies on themselves. In practice this essentially limits us to - "facet.version-lock.*" facets.""" + self.__pkg_remote.start() - # get installed (or planned) parent packages and facets - ppkgs = get_packages(img, pd=pd) - facets = img.cfg.facets - if pd is not None and pd.new_facets is not None: - facets = pd.new_facets + def child_op_is_done(self): + """Public interface to query if an operation on a child image + is done.""" - # create a packages dictionary indexed by package stem. - ppkgs_dict = dict([ - (pfmri.pkg_name, pfmri) - for pfmri in ppkgs - ]) + # if we have a return value this operation is done + if self.__child_op_rvtuple is not None: + return True - # - # For performance reasons see if we can limit ourselves to using the - # installed catalog. If this is a non-image modifying operation then - # the installed catalog should be sufficient. If this is an image - # modifying operation that is installing new packages, then we'll need - # to use the known catalog (which should already have been initialized - # and used during the image planning operation) to lookup information - # about the packages being installed. - # - cat = img.get_catalog(img.IMG_CATALOG_INSTALLED) - if not ppkgs <= frozenset(cat.fmris()): - cat = img.get_catalog(img.IMG_CATALOG_KNOWN) + # make sure there is some data from the child + return self.__pkg_remote.is_done() - # - # iterate through all installed (or planned) package incorporation - # dependency actions and find those that are affected by image facets. - # - # we don't check for package-wide facets here because they don't do - # anything. (ie, facets defined via "set" actions in a package have - # no effect on other actions within that package.) - # - faceted_deps = dict() - for pfmri in ppkgs: - for act in cat.get_entry_actions(pfmri, [cat.DEPENDENCY]): - # we're only interested in incorporate dependencies - if act.name != "depend" or \ - act.attrs["type"] != "incorporate": - continue - - # check if any image facets affect this dependency - # W0212 Access to a protected member - # pylint: disable=W0212 - matching_facets = facets._action_match(act) - # pylint: enable=W0212 - if not matching_facets: - continue - - # if all the matching facets are true we don't care - # about the match. - if set([i[1] for i in matching_facets]) == set([True]): - continue - - # save this set of facets. - faceted_deps[act] = matching_facets + def child_op_rv(self, expect_plan): + """Public interface to get the result of an operation on a + child image. - # - # For each faceted incorporation dependency, check if it affects a - # package that has parent dependencies on itself. This is really a - # best effort in that we don't follow package renames or obsoletions, - # etc. - # - # To limit the number of packages we inspect, we'll try to match the - # incorporation dependency fmri targets packages by stem to packages - # which are installed (or planned) within the parent image. This - # allows us to quickly get a fully qualified fmri and check against a - # package for which we have already downloaded a manifest. - # - # If we can't match the dependency fmri package stem against packages - # installed (or planned) in the parent image, we don't bother - # searching for allowable packages in the catalog, because even if we - # found them in the catalog and they did have a parent dependency, - # they'd all still be uninstallable in any children because there - # would be no way to satisfy the parent dependency. (as we already - # stated the package is not installed in the parent.) - # - faceted_linked_deps = dict() - for act in faceted_deps: - for fmri in act.attrlist("fmri"): - pfmri = pkg.fmri.PkgFmri(fmri) - pfmri = ppkgs_dict.get(pfmri.pkg_name, None) - if pfmri is None: - continue - - # check if this package has a dependency on itself in - # its parent image. - for act2 in cat.get_entry_actions(pfmri, - [cat.DEPENDENCY]): - if act2.name != "depend" or \ - act2.attrs["type"] != "parent": - continue - if pkg.actions.depend.DEPEND_SELF not in \ - act2.attrlist("fmri"): - continue - faceted_linked_deps[act] = faceted_deps[act] - break - del faceted_deps + 'expect_plan' boolean indicating if the child is performing a + planning operation. this is needed because if we're running + in parsable output mode then the child will emit a parsable + json version of the plan on stdout, and we'll verify it by + running it through the json parser. + """ - # - # Create a set of all facets which affect incorporation dependencies - # on synced packages. - # - # Note that we can't limit ourselves to only passing on facets that - # affect dependencies which have been disabled. Doing this could lead - # to incorrect results because facets allow for pattern matching. So - # for example say we had the following dependencies on synced - # packages: - # - # depend type=incorporation fmri=some_synced_pkg1 facet.123456=true - # depend type=incorporation fmri=some_synced_pkg2 facet.456789=true - # - # and the following image facets: - # - # facet.123456 = True - # facet.*456* = False - # - # if we only passed through facets which affected disabled packages - # we'd just pass through "facet.*456*", but this would result in - # disabling both dependencies above, not just the second dependency. - # - pfacets = pkg.facet.Facets() - for facets in faceted_linked_deps.values(): - for k, v in facets: - # W0212 Access to a protected member - # pylint: disable=W0212 - pfacets._set_inherited(k, v) + # The child op is now done, so we reset __child_op to make sure + # we don't accidentally reuse the LIC without properly setting + # it up again. However, we still need the op type in this + # function so we make a copy. + pkg_op = self.__child_op + self.__child_op = None + + # if we have a return value this operation is done + if self.__child_op_rvtuple is not None: + rvtuple = self.__child_op_rvtuple + self.__child_op_rvtuple = None + return (rvtuple, None, None) + + # make sure we're not going to block + assert self.__pkg_remote.is_done() + + (rv, e, stdout, stderr) = self.__pkg_remote.result() + if e is not None: + rv = pkgdefs.EXIT_OOPS + + # if we got an exception, or a return value other than OK or + # NOP, then return an exception. + if e is not None or rv not in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]: + e = apx.LinkedImageException( + lin=self.child_name, + exitrv=rv, + pkg_op_failed=(pkg_op, rv, stdout + stderr, e), + ) + rvtuple = LI_RVTuple(rv, e, None) + return (rvtuple, stdout, stderr) + + # check for NOP. + if rv == pkgdefs.EXIT_NOP: + assert e is None + rvtuple = LI_RVTuple(rv, None, None) + return (rvtuple, stdout, stderr) + + if ( + global_settings.client_output_parsable_version is None + or not expect_plan + ): + rvtuple = LI_RVTuple(rv, None, None) + return (rvtuple, stdout, stderr) + + # If a plan was created and we're in parsable output mode then + # parse the plan that should have been displayed to stdout. + p_dict = None + try: + p_dict = json.loads(stdout) + except ValueError as e: + # JSON raises a subclass of ValueError when it + # can't parse a string. + + e = apx.LinkedImageException( + lin=self.child_name, + unparsable_output=(pkg_op, stdout + stderr, e), + ) + rvtuple = LI_RVTuple(rv, e, None) + return (rvtuple, stdout, stderr) + + p_dict["image-name"] = str(self.child_name) + rvtuple = LI_RVTuple(rv, None, p_dict) + return (rvtuple, stdout, stderr) + + def fileno(self): + """Return the progress pipe associated with the PkgRemote + instance that is operating on a child image.""" + return self.__pkg_remote.fileno() + + def child_init_root(self): + """Our image path is being updated, so figure out our new + child image paths. This interface only gets invoked when: + + - We're doing a packaging operation on a parent image and + we've just cloned that parent to create a new BE that we're + going to update. This clone also cloned all the children + and so now we need to update our paths to point to the newly + created children. + + - We tried to update a cloned image (as described above) and + our update failed, hence we're changing paths back to the + original images that were the source of the clone.""" + + # PROP_PARENT_PATH better not be present because + # LinkedImageChild objects are only used with push child + # images. + assert PROP_PARENT_PATH not in self.__props + + # Remove any path transform and reapply. + self.__props = rm_dict_ent(self.__props, temporal_props) + self.__linked.set_path_transform( + self.__props, + self.__linked.get_path_transform(), + path=self.__props[PROP_PATH], + ) - return pfacets # --------------------------------------------------------------------------- -# Utility Functions +# Interfaces to obtain linked image metadata from an image # -def save_data(path, data, root="/", catch_exception=True): - """Save JSON encoded linked image metadata to a file.""" +def get_pubs(img): + """Return publisher information for the specified image. - def PkgEncode(obj): - """Required routine that overrides the default base - class version. This routine must serialize 'obj' when - attempting to save 'obj' json format.""" + Publisher information is returned in a sorted list of lists + of the format: + , - if isinstance(obj, (pkg.fmri.PkgFmri, - pkg.client.linkedimage.common.LinkedImageName)): - return str(obj) + Where: + is a string + is a boolean - if isinstance(obj, pkgplan.PkgPlan): - return obj.getstate() + The tuples are sorted by publisher rank. + """ - if isinstance(obj, (set, frozenset)): - return list(obj) + return [ + [str(p), p.sticky] + for p in img.get_sorted_publishers(inc_disabled=False) + if not p.nochild + ] - # make sure the directory we're about to save data into exists. - path_dir = os.path.dirname(path) - pathtmp = "{0}.{1:d}.tmp".format(path, os.getpid()) - try: - if not ar.ar_exists(root, path_dir): - ar.ar_mkdir(root, path_dir, misc.PKG_DIR_MODE, - exists_is_ok=True) # parallel zone create race - - # write the output to a temporary file - fd = ar.ar_open(root, pathtmp, os.O_WRONLY, - mode=0o644, create=True, truncate=True) - fobj = os.fdopen(fd, "w") - json.dump(data, fobj, default=PkgEncode) - fobj.close() - - # atomically create the desired file - ar.ar_rename(root, pathtmp, path) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - if catch_exception: - raise apx._convert_error(e) - raise e +def get_packages(img, pd=None): + """Figure out the current (or planned) list of packages in img.""" -def load_data(path, missing_ok=False, root="/", decode=True, - catch_exception=False): - """Load JSON encoded linked image metadata from a file.""" + ppkgs = set(img.get_catalog(img.IMG_CATALOG_INSTALLED).fmris()) - object_hook = None - if decode: - object_hook = pkg.client.linkedimage.PkgDecoder + # if there's an image plan the we need to update the installed + # packages based on that plan. + if pd is not None: + for src, dst in pd.plan_desc: + if src == dst: + continue + if src: + assert src in ppkgs + ppkgs -= set([src]) + if dst: + assert dst not in ppkgs + ppkgs |= set([dst]) - try: - if missing_ok and not path_exists(path, root=root): - return None + # paranoia + return frozenset(ppkgs) + + +def get_inheritable_facets(img, pd=None): + """Get Facets from an image that a child should inherit. + + We only want to sync facets which affect packages that have parent + dependencies on themselves. In practice this essentially limits us to + "facet.version-lock.*" facets.""" + + # get installed (or planned) parent packages and facets + ppkgs = get_packages(img, pd=pd) + facets = img.cfg.facets + if pd is not None and pd.new_facets is not None: + facets = pd.new_facets + + # create a packages dictionary indexed by package stem. + ppkgs_dict = dict([(pfmri.pkg_name, pfmri) for pfmri in ppkgs]) + + # + # For performance reasons see if we can limit ourselves to using the + # installed catalog. If this is a non-image modifying operation then + # the installed catalog should be sufficient. If this is an image + # modifying operation that is installing new packages, then we'll need + # to use the known catalog (which should already have been initialized + # and used during the image planning operation) to lookup information + # about the packages being installed. + # + cat = img.get_catalog(img.IMG_CATALOG_INSTALLED) + if not ppkgs <= frozenset(cat.fmris()): + cat = img.get_catalog(img.IMG_CATALOG_KNOWN) + + # + # iterate through all installed (or planned) package incorporation + # dependency actions and find those that are affected by image facets. + # + # we don't check for package-wide facets here because they don't do + # anything. (ie, facets defined via "set" actions in a package have + # no effect on other actions within that package.) + # + faceted_deps = dict() + for pfmri in ppkgs: + for act in cat.get_entry_actions(pfmri, [cat.DEPENDENCY]): + # we're only interested in incorporate dependencies + if act.name != "depend" or act.attrs["type"] != "incorporate": + continue + + # check if any image facets affect this dependency + # W0212 Access to a protected member + # pylint: disable=W0212 + matching_facets = facets._action_match(act) + # pylint: enable=W0212 + if not matching_facets: + continue + + # if all the matching facets are true we don't care + # about the match. + if set([i[1] for i in matching_facets]) == set([True]): + continue + + # save this set of facets. + faceted_deps[act] = matching_facets + + # + # For each faceted incorporation dependency, check if it affects a + # package that has parent dependencies on itself. This is really a + # best effort in that we don't follow package renames or obsoletions, + # etc. + # + # To limit the number of packages we inspect, we'll try to match the + # incorporation dependency fmri targets packages by stem to packages + # which are installed (or planned) within the parent image. This + # allows us to quickly get a fully qualified fmri and check against a + # package for which we have already downloaded a manifest. + # + # If we can't match the dependency fmri package stem against packages + # installed (or planned) in the parent image, we don't bother + # searching for allowable packages in the catalog, because even if we + # found them in the catalog and they did have a parent dependency, + # they'd all still be uninstallable in any children because there + # would be no way to satisfy the parent dependency. (as we already + # stated the package is not installed in the parent.) + # + faceted_linked_deps = dict() + for act in faceted_deps: + for fmri in act.attrlist("fmri"): + pfmri = pkg.fmri.PkgFmri(fmri) + pfmri = ppkgs_dict.get(pfmri.pkg_name, None) + if pfmri is None: + continue + + # check if this package has a dependency on itself in + # its parent image. + for act2 in cat.get_entry_actions(pfmri, [cat.DEPENDENCY]): + if act2.name != "depend" or act2.attrs["type"] != "parent": + continue + if pkg.actions.depend.DEPEND_SELF not in act2.attrlist("fmri"): + continue + faceted_linked_deps[act] = faceted_deps[act] + break + del faceted_deps + + # + # Create a set of all facets which affect incorporation dependencies + # on synced packages. + # + # Note that we can't limit ourselves to only passing on facets that + # affect dependencies which have been disabled. Doing this could lead + # to incorrect results because facets allow for pattern matching. So + # for example say we had the following dependencies on synced + # packages: + # + # depend type=incorporation fmri=some_synced_pkg1 facet.123456=true + # depend type=incorporation fmri=some_synced_pkg2 facet.456789=true + # + # and the following image facets: + # + # facet.123456 = True + # facet.*456* = False + # + # if we only passed through facets which affected disabled packages + # we'd just pass through "facet.*456*", but this would result in + # disabling both dependencies above, not just the second dependency. + # + pfacets = pkg.facet.Facets() + for facets in faceted_linked_deps.values(): + for k, v in facets: + # W0212 Access to a protected member + # pylint: disable=W0212 + pfacets._set_inherited(k, v) + + return pfacets + + +# --------------------------------------------------------------------------- +# Utility Functions +# +def save_data(path, data, root="/", catch_exception=True): + """Save JSON encoded linked image metadata to a file.""" + + def PkgEncode(obj): + """Required routine that overrides the default base + class version. This routine must serialize 'obj' when + attempting to save 'obj' json format.""" + + if isinstance( + obj, + (pkg.fmri.PkgFmri, pkg.client.linkedimage.common.LinkedImageName), + ): + return str(obj) + + if isinstance(obj, pkgplan.PkgPlan): + return obj.getstate() + + if isinstance(obj, (set, frozenset)): + return list(obj) + + # make sure the directory we're about to save data into exists. + path_dir = os.path.dirname(path) + pathtmp = "{0}.{1:d}.tmp".format(path, os.getpid()) + + try: + if not ar.ar_exists(root, path_dir): + ar.ar_mkdir( + root, path_dir, misc.PKG_DIR_MODE, exists_is_ok=True + ) # parallel zone create race + + # write the output to a temporary file + fd = ar.ar_open( + root, pathtmp, os.O_WRONLY, mode=0o644, create=True, truncate=True + ) + fobj = os.fdopen(fd, "w") + json.dump(data, fobj, default=PkgEncode) + fobj.close() + + # atomically create the desired file + ar.ar_rename(root, pathtmp, path) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + if catch_exception: + raise apx._convert_error(e) + raise e + + +def load_data( + path, missing_ok=False, root="/", decode=True, catch_exception=False +): + """Load JSON encoded linked image metadata from a file.""" + + object_hook = None + if decode: + object_hook = pkg.client.linkedimage.PkgDecoder + + try: + if missing_ok and not path_exists(path, root=root): + return None + + fd = ar.ar_open(root, path, os.O_RDONLY) + fobj = os.fdopen(fd, "r") + data = json.load(fobj, object_hook=object_hook) + fobj.close() + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + if catch_exception: + raise apx._convert_error(e) + raise apx._convert_error(e) + return data - fd = ar.ar_open(root, path, os.O_RDONLY) - fobj = os.fdopen(fd, "r") - data = json.load(fobj, object_hook=object_hook) - fobj.close() - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - if catch_exception: - raise apx._convert_error(e) - raise apx._convert_error(e) - return data def PkgDecoder(dct): - """Utility class used when json decoding linked image metadata.""" - # Replace unicode keys/values with strings - rvdct = {} - for k, v in six.iteritems(dct): + """Utility class used when json decoding linked image metadata.""" + # Replace unicode keys/values with strings + rvdct = {} + for k, v in six.iteritems(dct): + k = misc.force_str(k) + v = misc.force_str(v) - k = misc.force_str(k) - v = misc.force_str(v) + # convert boolean strings values back into booleans + if type(v) == str: + if v.lower() == "true": + v = True + elif v.lower() == "false": + v = False - # convert boolean strings values back into booleans - if type(v) == str: - if v.lower() == "true": - v = True - elif v.lower() == "false": - v = False + rvdct[k] = v + return rvdct - rvdct[k] = v - return rvdct def rm_dict_ent(d, keys): - """Remove a set of keys from a dictionary.""" - return dict([ - (k, v) - for k, v in six.iteritems(d) - if k not in keys - ]) - -def _rterr(li=None, lic=None, lin=None, path=None, err=None, + """Remove a set of keys from a dictionary.""" + return dict([(k, v) for k, v in six.iteritems(d) if k not in keys]) + + +def _rterr( + li=None, + lic=None, + lin=None, + path=None, + err=None, bad_cp=None, bad_iup=None, bad_lin_type=None, bad_prop=None, missing_props=None, multiple_transforms=None, - saved_temporal_props=None): - """Oops. We hit a runtime error. Die with a nice informative - message. Note that runtime errors should never happen and usually - indicate bugs (or possibly corrupted linked image metadata), so they - are not localized (just like asserts are not localized).""" - - assert not (li and lic) - assert not ((lin or path) and li) - assert not ((lin or path) and lic) - assert path is None or type(path) == str - - if bad_cp: - assert err is None - err = "Invalid linked content policy: {0}".format(bad_cp) - elif bad_iup: - assert err is None - err = "Invalid linked image update policy: {0}".format(bad_iup) - elif bad_lin_type: - assert err is None - err = "Invalid linked image type: {0}".format(bad_lin_type) - elif bad_prop: - assert err is None - err = "Invalid linked property value: {0}={1}".format(*bad_prop) - elif missing_props: - assert err is None - err = "Missing required linked properties: {0}".format( - ", ".join(missing_props)) - elif multiple_transforms: - assert err is None - err = "Multiple plugins reported different path transforms:" - for plugin, transform in multiple_transforms: - err += "\n\t{0} = {1} -> {2}".format(plugin, - transform[0], transform[1]) - elif saved_temporal_props: - assert err is None - err = "Found saved temporal linked properties: {0}".format( - ", ".join(saved_temporal_props)) - else: - assert err != None + saved_temporal_props=None, +): + """Oops. We hit a runtime error. Die with a nice informative + message. Note that runtime errors should never happen and usually + indicate bugs (or possibly corrupted linked image metadata), so they + are not localized (just like asserts are not localized).""" + + assert not (li and lic) + assert not ((lin or path) and li) + assert not ((lin or path) and lic) + assert path is None or type(path) == str + + if bad_cp: + assert err is None + err = "Invalid linked content policy: {0}".format(bad_cp) + elif bad_iup: + assert err is None + err = "Invalid linked image update policy: {0}".format(bad_iup) + elif bad_lin_type: + assert err is None + err = "Invalid linked image type: {0}".format(bad_lin_type) + elif bad_prop: + assert err is None + err = "Invalid linked property value: {0}={1}".format(*bad_prop) + elif missing_props: + assert err is None + err = "Missing required linked properties: {0}".format( + ", ".join(missing_props) + ) + elif multiple_transforms: + assert err is None + err = "Multiple plugins reported different path transforms:" + for plugin, transform in multiple_transforms: + err += "\n\t{0} = {1} -> {2}".format( + plugin, transform[0], transform[1] + ) + elif saved_temporal_props: + assert err is None + err = "Found saved temporal linked properties: {0}".format( + ", ".join(saved_temporal_props) + ) + else: + assert err != None + + if li: + if li.ischild(): + lin = li.child_name + path = li.image.root + + if lic: + lin = lic.child_name + path = lic.child_path + + err_prefix = "Linked image error: " + if lin: + err_prefix = "Linked image ({0}) error: ".format(str(lin)) + + err_suffix = "" + if path and lin: + err_suffix = "\nLinked image ({0}) path: {1}".format(str(lin), path) + elif path: + err_suffix = "\nLinked image path: {0}".format(path) + + raise RuntimeError("{0}: {1}{2}".format(err_prefix, err, err_suffix)) - if li: - if li.ischild(): - lin = li.child_name - path = li.image.root - - if lic: - lin = lic.child_name - path = lic.child_path - - err_prefix = "Linked image error: " - if lin: - err_prefix = "Linked image ({0}) error: ".format(str(lin)) - - err_suffix = "" - if path and lin: - err_suffix = "\nLinked image ({0}) path: {1}".format(str(lin), - path) - elif path: - err_suffix = "\nLinked image path: {0}".format(path) - - raise RuntimeError( - "{0}: {1}{2}".format(err_prefix, err, err_suffix)) # --------------------------------------------------------------------------- # Functions for accessing files in the current root # def path_exists(path, root="/"): - """Simple wrapper for accessing files in the current root.""" + """Simple wrapper for accessing files in the current root.""" + + try: + return ar.ar_exists(root, path) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + raise apx._convert_error(e) - try: - return ar.ar_exists(root, path) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - raise apx._convert_error(e) def path_isdir(path): - """Simple wrapper for accessing files in the current root.""" + """Simple wrapper for accessing files in the current root.""" + + try: + return ar.ar_isdir("/", path) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + raise apx._convert_error(e) - try: - return ar.ar_isdir("/", path) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - raise apx._convert_error(e) def path_mkdir(path, mode): - """Simple wrapper for accessing files in the current root.""" + """Simple wrapper for accessing files in the current root.""" + + try: + return ar.ar_mkdir("/", path, mode) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + raise apx._convert_error(e) - try: - return ar.ar_mkdir("/", path, mode) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - raise apx._convert_error(e) def path_unlink(path, noent_ok=False): - """Simple wrapper for accessing files in the current root.""" + """Simple wrapper for accessing files in the current root.""" + + try: + return ar.ar_unlink("/", path, noent_ok=noent_ok) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + raise apx._convert_error(e) - try: - return ar.ar_unlink("/", path, noent_ok=noent_ok) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - raise apx._convert_error(e) # --------------------------------------------------------------------------- # Functions for managing images which may be in alternate roots # + def path_transform_applicable(path, path_transform): - """Check if 'path_transform' can be applied to 'path'.""" + """Check if 'path_transform' can be applied to 'path'.""" - # Make sure path has a leading and trailing os.sep. - assert os.path.isabs(path), "path is not absolute: {0}".format(path) - path = path.rstrip(os.sep) + os.sep + # Make sure path has a leading and trailing os.sep. + assert os.path.isabs(path), "path is not absolute: {0}".format(path) + path = path.rstrip(os.sep) + os.sep - # If there is no transform, then any any translation is valid. - if path_transform == PATH_TRANSFORM_NONE: - return True + # If there is no transform, then any any translation is valid. + if path_transform == PATH_TRANSFORM_NONE: + return True + + # check for nested or equal paths + if path.startswith(path_transform[0]): + return True + return False - # check for nested or equal paths - if path.startswith(path_transform[0]): - return True - return False def path_transform_applied(path, path_transform): - """Check if 'path_transform' has been applied to 'path'.""" + """Check if 'path_transform' has been applied to 'path'.""" - # Make sure path has a leading and trailing os.sep. - assert os.path.isabs(path), "path is not absolute: {0}".format(path) - path = path.rstrip(os.sep) + os.sep + # Make sure path has a leading and trailing os.sep. + assert os.path.isabs(path), "path is not absolute: {0}".format(path) + path = path.rstrip(os.sep) + os.sep + + # Reverse the transform. + path_transform = (path_transform[1], path_transform[0]) + return path_transform_applicable(path, path_transform) - # Reverse the transform. - path_transform = (path_transform[1], path_transform[0]) - return path_transform_applicable(path, path_transform) def path_transform_apply(path, path_transform): - """Apply the 'path_transform' to 'path'.""" + """Apply the 'path_transform' to 'path'.""" - # Make sure path has a leading and trailing os.sep. - assert os.path.isabs(path), "path is not absolute: {0}".format(path) - path = path.rstrip(os.sep) + os.sep + # Make sure path has a leading and trailing os.sep. + assert os.path.isabs(path), "path is not absolute: {0}".format(path) + path = path.rstrip(os.sep) + os.sep + + if path_transform == PATH_TRANSFORM_NONE: + return path - if path_transform == PATH_TRANSFORM_NONE: - return path + oroot, nroot = path_transform + assert path_transform_applicable(path, path_transform) + return os.path.join(nroot, path[len(oroot) :]) - oroot, nroot = path_transform - assert path_transform_applicable(path, path_transform) - return os.path.join(nroot, path[len(oroot):]) def path_transform_revert(path, path_transform): - """Unapply the 'path_transform' from 'path'.""" + """Unapply the 'path_transform' from 'path'.""" + + # Reverse the transform. + path_transform = (path_transform[1], path_transform[0]) + return path_transform_apply(path, path_transform) - # Reverse the transform. - path_transform = (path_transform[1], path_transform[0]) - return path_transform_apply(path, path_transform) def compute_path_transform(opath, npath): - """Given an two paths create a transform that can be used to translate - between them.""" - - # Make sure all paths have a leading and trailing os.sep. - assert os.path.isabs(opath), "opath is not absolute: {0}".format(opath) - assert os.path.isabs(npath), "npath is not absolute: {0}".format(npath) - opath = opath.rstrip(os.sep) + os.sep - npath = npath.rstrip(os.sep) + os.sep - - # Remove the longest common path suffix. Do this by reversing the - # path strings, finding the longest common prefix, removing the common - # prefix, and reversing the paths strings again. Make sure there is a - # trailing os.sep. - i = 0 - opath_rev = opath[::-1] - npath_rev = npath[::-1] - for i in range(min(len(opath_rev), len(npath_rev))): - if opath_rev[i] != npath_rev[i]: - break - oroot = opath_rev[i:][::-1].rstrip(os.sep) + os.sep - nroot = npath_rev[i:][::-1].rstrip(os.sep) + os.sep - - # Old root and new root should start and end with a '/'. - assert oroot[0] == nroot[0] == '/' - assert oroot[-1] == nroot[-1] == '/' - - # Return the altroot transform tuple. - if oroot == nroot: - return PATH_TRANSFORM_NONE - return (oroot, nroot) + """Given an two paths create a transform that can be used to translate + between them.""" + + # Make sure all paths have a leading and trailing os.sep. + assert os.path.isabs(opath), "opath is not absolute: {0}".format(opath) + assert os.path.isabs(npath), "npath is not absolute: {0}".format(npath) + opath = opath.rstrip(os.sep) + os.sep + npath = npath.rstrip(os.sep) + os.sep + + # Remove the longest common path suffix. Do this by reversing the + # path strings, finding the longest common prefix, removing the common + # prefix, and reversing the paths strings again. Make sure there is a + # trailing os.sep. + i = 0 + opath_rev = opath[::-1] + npath_rev = npath[::-1] + for i in range(min(len(opath_rev), len(npath_rev))): + if opath_rev[i] != npath_rev[i]: + break + oroot = opath_rev[i:][::-1].rstrip(os.sep) + os.sep + nroot = npath_rev[i:][::-1].rstrip(os.sep) + os.sep + + # Old root and new root should start and end with a '/'. + assert oroot[0] == nroot[0] == "/" + assert oroot[-1] == nroot[-1] == "/" + + # Return the altroot transform tuple. + if oroot == nroot: + return PATH_TRANSFORM_NONE + return (oroot, nroot) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/linkedimage/system.py b/src/modules/client/linkedimage/system.py index 86209532b..2cc44ae62 100644 --- a/src/modules/client/linkedimage/system.py +++ b/src/modules/client/linkedimage/system.py @@ -34,110 +34,109 @@ import pkg.client.pkgdefs as pkgdefs # import linked image common code -from . import common as li # Relative import; pylint: disable=W0403 +from . import common as li # Relative import; pylint: disable=W0403 class LinkedImageSystemPlugin(li.LinkedImagePlugin): - """See parent class for docstring.""" + """See parent class for docstring.""" - # specify what functionality we support - support_attach = True - support_detach = True + # specify what functionality we support + support_attach = True + support_detach = True - # default attach property values - attach_props_def = { - li.PROP_RECURSE: True - } + # default attach property values + attach_props_def = {li.PROP_RECURSE: True} - def __init__(self, pname, linked): - """See parent class for docstring.""" - li.LinkedImagePlugin.__init__(self, pname, linked) + def __init__(self, pname, linked): + """See parent class for docstring.""" + li.LinkedImagePlugin.__init__(self, pname, linked) - # globals - self.__img = linked.image - self.__pname = pname - self.__linked = linked + # globals + self.__img = linked.image + self.__pname = pname + self.__linked = linked - def init_root(self, root): - """See parent class for docstring.""" - # nothing to do - return + def init_root(self, root): + """See parent class for docstring.""" + # nothing to do + return - def guess_path_transform(self, ignore_errors=False): - """See parent class for docstring.""" - # nothing to do - return li.PATH_TRANSFORM_NONE + def guess_path_transform(self, ignore_errors=False): + """See parent class for docstring.""" + # nothing to do + return li.PATH_TRANSFORM_NONE - def get_child_list(self, nocache=False, ignore_errors=False): - """See parent class for docstring.""" + def get_child_list(self, nocache=False, ignore_errors=False): + """See parent class for docstring.""" - if not self.__img.cfg: - # this may be a new image that hasn't actually been - # created yet - return [] + if not self.__img.cfg: + # this may be a new image that hasn't actually been + # created yet + return [] - rv = [] - for lin in self.__img.cfg.linked_children: - path = self.get_child_props(lin)[li.PROP_PATH] - rv.append([lin, path]) + rv = [] + for lin in self.__img.cfg.linked_children: + path = self.get_child_props(lin)[li.PROP_PATH] + rv.append([lin, path]) - for lin, path in rv: - assert lin.lin_type == self.__pname + for lin, path in rv: + assert lin.lin_type == self.__pname - return rv + return rv - def get_child_props(self, lin): - """See parent class for docstring.""" + def get_child_props(self, lin): + """See parent class for docstring.""" - # return a copy of the properties - return self.__img.cfg.linked_children[lin].copy() + # return a copy of the properties + return self.__img.cfg.linked_children[lin].copy() - def attach_child_inmemory(self, props, allow_relink): - """See parent class for docstring.""" + def attach_child_inmemory(self, props, allow_relink): + """See parent class for docstring.""" - # make sure this child doesn't already exist - lin_list = [i[0] for i in self.get_child_list()] - lin = props[li.PROP_NAME] - assert lin not in lin_list or allow_relink + # make sure this child doesn't already exist + lin_list = [i[0] for i in self.get_child_list()] + lin = props[li.PROP_NAME] + assert lin not in lin_list or allow_relink - # make a copy of the properties - props = props.copy() + # make a copy of the properties + props = props.copy() - # delete temporal properties - props = li.rm_dict_ent(props, li.temporal_props) + # delete temporal properties + props = li.rm_dict_ent(props, li.temporal_props) - self.__img.cfg.linked_children[lin] = props + self.__img.cfg.linked_children[lin] = props - def detach_child_inmemory(self, lin): - """See parent class for docstring.""" + def detach_child_inmemory(self, lin): + """See parent class for docstring.""" - # make sure this child exists - assert lin in [i[0] for i in self.get_child_list()] + # make sure this child exists + assert lin in [i[0] for i in self.get_child_list()] - # Delete this linked image - del self.__img.cfg.linked_children[lin] + # Delete this linked image + del self.__img.cfg.linked_children[lin] - def sync_children_todisk(self): - """See parent class for docstring.""" + def sync_children_todisk(self): + """See parent class for docstring.""" - self.__img.cfg.write() + self.__img.cfg.write() - return li.LI_RVTuple(pkgdefs.EXIT_OK, None, None) + return li.LI_RVTuple(pkgdefs.EXIT_OK, None, None) class LinkedImageSystemChildPlugin(li.LinkedImageChildPlugin): + """See parent class for docstring.""" + + def __init__(self, lic): """See parent class for docstring.""" + li.LinkedImageChildPlugin.__init__(self, lic) - def __init__(self, lic): - """See parent class for docstring.""" - li.LinkedImageChildPlugin.__init__(self, lic) + # globals + self.__linked = lic.child_pimage.linked - # globals - self.__linked = lic.child_pimage.linked + def munge_props(self, props): + """See parent class for docstring.""" + pass - def munge_props(self, props): - """See parent class for docstring.""" - pass # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/linkedimage/zone.py b/src/modules/client/linkedimage/zone.py index dea77aede..b73a7cddd 100644 --- a/src/modules/client/linkedimage/zone.py +++ b/src/modules/client/linkedimage/zone.py @@ -46,23 +46,23 @@ from pkg.client.debugvalues import DebugValues # import linked image common code -from . import common as li # Relative import; pylint: disable=W0403 +from . import common as li # Relative import; pylint: disable=W0403 # W0511 XXX / FIXME Comments; pylint: disable=W0511 # XXX: should be defined by libzonecfg python wrapper # pylint: enable=W0511 -ZONE_GLOBAL = "global" +ZONE_GLOBAL = "global" -ZONE_STATE_STR_CONFIGURED = "configured" -ZONE_STATE_STR_INCOMPLETE = "incomplete" -ZONE_STATE_STR_UNAVAILABLE = "unavailable" -ZONE_STATE_STR_INSTALLED = "installed" -ZONE_STATE_STR_READY = "ready" -ZONE_STATE_STR_MOUNTED = "mounted" -ZONE_STATE_STR_RUNNING = "running" +ZONE_STATE_STR_CONFIGURED = "configured" +ZONE_STATE_STR_INCOMPLETE = "incomplete" +ZONE_STATE_STR_UNAVAILABLE = "unavailable" +ZONE_STATE_STR_INSTALLED = "installed" +ZONE_STATE_STR_READY = "ready" +ZONE_STATE_STR_MOUNTED = "mounted" +ZONE_STATE_STR_RUNNING = "running" ZONE_STATE_STR_SHUTTING_DOWN = "shutting_down" -ZONE_STATE_STR_DOWN = "down" +ZONE_STATE_STR_DOWN = "down" zone_installed_states = [ ZONE_STATE_STR_INSTALLED, @@ -70,7 +70,7 @@ ZONE_STATE_STR_MOUNTED, ZONE_STATE_STR_RUNNING, ZONE_STATE_STR_SHUTTING_DOWN, - ZONE_STATE_STR_DOWN + ZONE_STATE_STR_DOWN, ] @@ -118,433 +118,436 @@ class LinkedImageZonePlugin(li.LinkedImagePlugin): + """See parent class for docstring.""" + + # default attach property values + attach_props_def = {li.PROP_RECURSE: False} + + __zone_pkgs = frozenset( + [frozenset(["system/zones"]), frozenset(["SUNWzoner", "SUNWzoneu"])] + ) + + def __init__(self, pname, linked): """See parent class for docstring.""" + li.LinkedImagePlugin.__init__(self, pname, linked) - # default attach property values - attach_props_def = { - li.PROP_RECURSE: False - } - - __zone_pkgs = frozenset([ - frozenset(["system/zones"]), - frozenset(["SUNWzoner", "SUNWzoneu"]) - ]) - - def __init__(self, pname, linked): - """See parent class for docstring.""" - li.LinkedImagePlugin.__init__(self, pname, linked) - - # globals - self.__pname = pname - self.__linked = linked - self.__img = linked.image - self.__in_gz_cached = None - - # keep track of our freshly attach children - self.__children = dict() - - # cache zoneadm output - self.__zoneadm_list_cache = None - - def __in_gz(self, ignore_errors=False): - """Check if we're executing in the global zone. Note that - this doesn't tell us anything about the image we're - manipulating, just the environment that we're running in.""" - - if self.__in_gz_cached != None: - return self.__in_gz_cached - - # check if we're running in the gz - try: - self.__in_gz_cached = (_zonename() == ZONE_GLOBAL) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - if ignore_errors: - # default to being in the global zone - return True - raise apx._convert_error(e) - except apx.LinkedImageException as e: - if ignore_errors: - # default to being in the global zone - return True - raise e - - return self.__in_gz_cached - - def __zones_supported(self): - """Check to see if zones are supported in the current image. - i.e. can the current image have zone children.""" - - # pylint: disable=E1120 - if DebugValues.get_value("zones_supported"): - return True - # pylint: enable=E1120 - - # first check if the image variant is global - variant = "variant.opensolaris.zone" - value = self.__img.cfg.variants[variant] - if value != "global": - return False - - # - # sanity check the path to to /etc/zones. below we check for - # the zones packages, and any image that has the zones - # packages installed should have a /etc/zones file (since - # those packages deliver this file) but it's possible that the - # image was corrupted and the user now wants to be able to run - # pkg commands to fix it. if the path doesn't exist then we - # don't have any zones so just report that zones are - # unsupported (since zoneadm may fail to run anyway). - # - path = self.__img.root - if not os.path.isdir(os.path.join(path, "etc")): - return False - if not os.path.isdir(os.path.join(path, "etc/zones")): - return False - - # get a set of installed packages - cati = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED) - pkgs_inst = frozenset([ - stem - # Unused variable 'pub'; pylint: disable=W0612 - for pub, stem in cati.pkg_names() - # pylint: enable=W0612 - ]) - - # check if the zones packages are installed - for pkgs in self.__zone_pkgs: - if (pkgs & pkgs_inst) == pkgs: - return True - - return False - - def __list_zones_cached(self, nocache=False, ignore_errors=False): - """List the zones associated with the current image. Since - this involves forking and running zone commands, cache the - results.""" - - # if nocache is set then delete any cached children - if nocache: - self.__zoneadm_list_cache = None - - # try to return the cached children - if self.__zoneadm_list_cache != None: - assert type(self.__zoneadm_list_cache) == list - return self.__zoneadm_list_cache - - # see if the target image supports zones - if not self.__zones_supported(): - self.__zoneadm_list_cache = [] - return self.__list_zones_cached() - - # zones are only visible when running in the global zone - if not self.__in_gz(ignore_errors=ignore_errors): - self.__zoneadm_list_cache = [] - return self.__list_zones_cached() - - # find zones - try: - zdict = _list_zones(self.__img.root, - self.__linked.get_path_transform()) - except OSError as e: - # W0212 Access to a protected member - # pylint: disable=W0212 - if ignore_errors: - # don't cache the result - return [] - raise apx._convert_error(e) - except apx.LinkedImageException as e: - if ignore_errors: - # don't cache the result - return [] - raise e - - # convert zone names into into LinkedImageName objects - zlist = [] - # state is unused - # pylint: disable=W0612 - for zone, (path, state) in six.iteritems(zdict): - lin = li.LinkedImageName("{0}:{1}".format(self.__pname, - zone)) - zlist.append([lin, path]) - - self.__zoneadm_list_cache = zlist - return self.__list_zones_cached() - - def init_root(self, root): - """See parent class for docstring.""" - # nuke any cached children - self.__zoneadm_list_cache = None - - def guess_path_transform(self, ignore_errors=False): - """See parent class for docstring.""" - - zlist = self.__list_zones_cached(nocache=True, - ignore_errors=ignore_errors) - if not zlist: - return li.PATH_TRANSFORM_NONE - - # only global zones can have zone children, and global zones - # always execute with "/" as their root. so if the current - # image path is not "/", then assume we're in an alternate - # root. - root = self.__img.root.rstrip(os.sep) + os.sep - return (os.sep, root) - - def get_child_list(self, nocache=False, ignore_errors=False): - """See parent class for docstring.""" - - inmemory = [] - # find any newly attached zone images - for lin in self.__children: - path = self.__children[lin][li.PROP_PATH] - inmemory.append([lin, path]) - - ondisk = [] - for (lin, path) in self.__list_zones_cached(nocache, - ignore_errors=ignore_errors): - if lin in [i[0] for i in inmemory]: - # we re-attached a zone in memory. - continue - ondisk.append([lin, path]) - - rv = [] - rv.extend(ondisk) - rv.extend(inmemory) - - for lin, path in rv: - assert lin.lin_type == self.__pname - - return rv - - def get_child_props(self, lin): - """See parent class for docstring.""" - - if lin in self.__children: - return self.__children[lin] - - props = dict() - props[li.PROP_NAME] = lin - for i_lin, i_path in self.get_child_list(): - if lin == i_lin: - props[li.PROP_PATH] = i_path - break - assert li.PROP_PATH in props - - props[li.PROP_MODEL] = li.PV_MODEL_PUSH - for k, v in six.iteritems(self.attach_props_def): - if k not in props: - props[k] = v - - return props - - def attach_child_inmemory(self, props, allow_relink): - """See parent class for docstring.""" - - # make sure this child doesn't already exist - lin = props[li.PROP_NAME] - lin_list = [i[0] for i in self.get_child_list()] - assert lin not in lin_list or allow_relink - - # cache properties (sans any temporarl ones) - self.__children[lin] = li.rm_dict_ent(props, li.temporal_props) - - def detach_child_inmemory(self, lin): - """See parent class for docstring.""" - - # make sure this child exists - assert lin in [i[0] for i in self.get_child_list()] + # globals + self.__pname = pname + self.__linked = linked + self.__img = linked.image + self.__in_gz_cached = None - # Delete this linked image - del self.__children[lin] + # keep track of our freshly attach children + self.__children = dict() - def sync_children_todisk(self): - """See parent class for docstring.""" + # cache zoneadm output + self.__zoneadm_list_cache = None - # nothing to do - return li.LI_RVTuple(pkgdefs.EXIT_OK, None, None) + def __in_gz(self, ignore_errors=False): + """Check if we're executing in the global zone. Note that + this doesn't tell us anything about the image we're + manipulating, just the environment that we're running in.""" + if self.__in_gz_cached != None: + return self.__in_gz_cached -class LinkedImageZoneChildPlugin(li.LinkedImageChildPlugin): + # check if we're running in the gz + try: + self.__in_gz_cached = _zonename() == ZONE_GLOBAL + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + if ignore_errors: + # default to being in the global zone + return True + raise apx._convert_error(e) + except apx.LinkedImageException as e: + if ignore_errors: + # default to being in the global zone + return True + raise e + + return self.__in_gz_cached + + def __zones_supported(self): + """Check to see if zones are supported in the current image. + i.e. can the current image have zone children.""" + + # pylint: disable=E1120 + if DebugValues.get_value("zones_supported"): + return True + # pylint: enable=E1120 + + # first check if the image variant is global + variant = "variant.opensolaris.zone" + value = self.__img.cfg.variants[variant] + if value != "global": + return False + + # + # sanity check the path to to /etc/zones. below we check for + # the zones packages, and any image that has the zones + # packages installed should have a /etc/zones file (since + # those packages deliver this file) but it's possible that the + # image was corrupted and the user now wants to be able to run + # pkg commands to fix it. if the path doesn't exist then we + # don't have any zones so just report that zones are + # unsupported (since zoneadm may fail to run anyway). + # + path = self.__img.root + if not os.path.isdir(os.path.join(path, "etc")): + return False + if not os.path.isdir(os.path.join(path, "etc/zones")): + return False + + # get a set of installed packages + cati = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED) + pkgs_inst = frozenset( + [ + stem + # Unused variable 'pub'; pylint: disable=W0612 + for pub, stem in cati.pkg_names() + # pylint: enable=W0612 + ] + ) + + # check if the zones packages are installed + for pkgs in self.__zone_pkgs: + if (pkgs & pkgs_inst) == pkgs: + return True + + return False + + def __list_zones_cached(self, nocache=False, ignore_errors=False): + """List the zones associated with the current image. Since + this involves forking and running zone commands, cache the + results.""" + + # if nocache is set then delete any cached children + if nocache: + self.__zoneadm_list_cache = None + + # try to return the cached children + if self.__zoneadm_list_cache != None: + assert type(self.__zoneadm_list_cache) == list + return self.__zoneadm_list_cache + + # see if the target image supports zones + if not self.__zones_supported(): + self.__zoneadm_list_cache = [] + return self.__list_zones_cached() + + # zones are only visible when running in the global zone + if not self.__in_gz(ignore_errors=ignore_errors): + self.__zoneadm_list_cache = [] + return self.__list_zones_cached() + + # find zones + try: + zdict = _list_zones( + self.__img.root, self.__linked.get_path_transform() + ) + except OSError as e: + # W0212 Access to a protected member + # pylint: disable=W0212 + if ignore_errors: + # don't cache the result + return [] + raise apx._convert_error(e) + except apx.LinkedImageException as e: + if ignore_errors: + # don't cache the result + return [] + raise e + + # convert zone names into into LinkedImageName objects + zlist = [] + # state is unused + # pylint: disable=W0612 + for zone, (path, state) in six.iteritems(zdict): + lin = li.LinkedImageName("{0}:{1}".format(self.__pname, zone)) + zlist.append([lin, path]) + + self.__zoneadm_list_cache = zlist + return self.__list_zones_cached() + + def init_root(self, root): """See parent class for docstring.""" + # nuke any cached children + self.__zoneadm_list_cache = None - def __init__(self, lic): - """See parent class for docstring.""" - li.LinkedImageChildPlugin.__init__(self, lic) + def guess_path_transform(self, ignore_errors=False): + """See parent class for docstring.""" + + zlist = self.__list_zones_cached( + nocache=True, ignore_errors=ignore_errors + ) + if not zlist: + return li.PATH_TRANSFORM_NONE + + # only global zones can have zone children, and global zones + # always execute with "/" as their root. so if the current + # image path is not "/", then assume we're in an alternate + # root. + root = self.__img.root.rstrip(os.sep) + os.sep + return (os.sep, root) + + def get_child_list(self, nocache=False, ignore_errors=False): + """See parent class for docstring.""" + + inmemory = [] + # find any newly attached zone images + for lin in self.__children: + path = self.__children[lin][li.PROP_PATH] + inmemory.append([lin, path]) + + ondisk = [] + for lin, path in self.__list_zones_cached( + nocache, ignore_errors=ignore_errors + ): + if lin in [i[0] for i in inmemory]: + # we re-attached a zone in memory. + continue + ondisk.append([lin, path]) + + rv = [] + rv.extend(ondisk) + rv.extend(inmemory) - def munge_props(self, props): - """See parent class for docstring.""" + for lin, path in rv: + assert lin.lin_type == self.__pname - # - # For zones we always update the pushed child image path to - # be '/' (Since any linked children of the zone will be - # relative to that zone's root). - # - props[li.PROP_PATH] = "/" + return rv + + def get_child_props(self, lin): + """See parent class for docstring.""" + + if lin in self.__children: + return self.__children[lin] + + props = dict() + props[li.PROP_NAME] = lin + for i_lin, i_path in self.get_child_list(): + if lin == i_lin: + props[li.PROP_PATH] = i_path + break + assert li.PROP_PATH in props + + props[li.PROP_MODEL] = li.PV_MODEL_PUSH + for k, v in six.iteritems(self.attach_props_def): + if k not in props: + props[k] = v + + return props + + def attach_child_inmemory(self, props, allow_relink): + """See parent class for docstring.""" + + # make sure this child doesn't already exist + lin = props[li.PROP_NAME] + lin_list = [i[0] for i in self.get_child_list()] + assert lin not in lin_list or allow_relink + + # cache properties (sans any temporarl ones) + self.__children[lin] = li.rm_dict_ent(props, li.temporal_props) + + def detach_child_inmemory(self, lin): + """See parent class for docstring.""" + + # make sure this child exists + assert lin in [i[0] for i in self.get_child_list()] + + # Delete this linked image + del self.__children[lin] + + def sync_children_todisk(self): + """See parent class for docstring.""" + + # nothing to do + return li.LI_RVTuple(pkgdefs.EXIT_OK, None, None) + + +class LinkedImageZoneChildPlugin(li.LinkedImageChildPlugin): + """See parent class for docstring.""" + + def __init__(self, lic): + """See parent class for docstring.""" + li.LinkedImageChildPlugin.__init__(self, lic) + + def munge_props(self, props): + """See parent class for docstring.""" + + # + # For zones we always update the pushed child image path to + # be '/' (Since any linked children of the zone will be + # relative to that zone's root). + # + props[li.PROP_PATH] = "/" def _zonename(): - """Get the zonname of the current system.""" - - cmd = DebugValues.get_value("bin_zonename") # pylint: disable=E1120 - if cmd is not None: - cmd = [cmd] - else: - cmd = ["/bin/zonename"] - - # if the command doesn't exist then bail. - if not li.path_exists(cmd[0]): - return - - # open a temporary file in text mode for compatible string handling - fout = tempfile.TemporaryFile(mode="w+") - ferrout = tempfile.TemporaryFile(mode="w+") - p = pkg.pkgsubprocess.Popen(cmd, stdout=fout, stderr=ferrout) - p.wait() - if p.returncode != 0: - cmd = " ".join(cmd) - ferrout.seek(0) - errout = "".join(ferrout.readlines()) - ferrout.close() - raise apx.LinkedImageException( - cmd_failed=(p.returncode, cmd, errout)) - - # parse the command output - fout.seek(0) - lines = fout.readlines() - if lines: - zonename = lines[0].rstrip() - fout.close() - return zonename - - # If /bin/zonename does not return the expected output, - # we raise an exception of LinkedImageException, which - # is handled by _in_gz(). + """Get the zonname of the current system.""" + + cmd = DebugValues.get_value("bin_zonename") # pylint: disable=E1120 + if cmd is not None: + cmd = [cmd] + else: + cmd = ["/bin/zonename"] + + # if the command doesn't exist then bail. + if not li.path_exists(cmd[0]): + return + + # open a temporary file in text mode for compatible string handling + fout = tempfile.TemporaryFile(mode="w+") + ferrout = tempfile.TemporaryFile(mode="w+") + p = pkg.pkgsubprocess.Popen(cmd, stdout=fout, stderr=ferrout) + p.wait() + if p.returncode != 0: cmd = " ".join(cmd) - raise apx.LinkedImageException( - cmd_output_invalid=(cmd, lines)) + ferrout.seek(0) + errout = "".join(ferrout.readlines()) + ferrout.close() + raise apx.LinkedImageException(cmd_failed=(p.returncode, cmd, errout)) + + # parse the command output + fout.seek(0) + lines = fout.readlines() + if lines: + zonename = lines[0].rstrip() + fout.close() + return zonename + # If /bin/zonename does not return the expected output, + # we raise an exception of LinkedImageException, which + # is handled by _in_gz(). + cmd = " ".join(cmd) + raise apx.LinkedImageException(cmd_output_invalid=(cmd, lines)) -def _zoneadm_list_parse(line, cmd, output): - """Parse zoneadm list -p output. It's possible for zonepath to - contain a ":". If it does it will be escaped to be "\\:". (But note - that if the zonepath contains a "\" it will not be escaped, which - is argubaly a bug.)""" - - # zoneadm list output should never contain a NUL char, so - # temporarily replace any escaped colons with a NUL, split the string - # on any remaining colons, and then switch any NULs back to colons. - tmp_char = "\0" - fields = [ - field.replace(tmp_char, ":") - for field in line.replace(r"\:", tmp_char).split(":") - ] - try: - # Unused variable; pylint: disable=W0612 - z_id, z_name, z_state, z_path, z_uuid, z_brand, z_iptype = \ - fields[:7] - # pylint: enable=W0612 - except ValueError: - raise apx.LinkedImageException( - cmd_output_invalid=(cmd, output)) +def _zoneadm_list_parse(line, cmd, output): + """Parse zoneadm list -p output. It's possible for zonepath to + contain a ":". If it does it will be escaped to be "\\:". (But note + that if the zonepath contains a "\" it will not be escaped, which + is argubaly a bug.)""" + + # zoneadm list output should never contain a NUL char, so + # temporarily replace any escaped colons with a NUL, split the string + # on any remaining colons, and then switch any NULs back to colons. + tmp_char = "\0" + fields = [ + field.replace(tmp_char, ":") + for field in line.replace(r"\:", tmp_char).split(":") + ] + + try: + # Unused variable; pylint: disable=W0612 + z_id, z_name, z_state, z_path, z_uuid, z_brand, z_iptype = fields[:7] + # pylint: enable=W0612 + except ValueError: + raise apx.LinkedImageException(cmd_output_invalid=(cmd, output)) + + return z_name, z_state, z_path, z_brand - return z_name, z_state, z_path, z_brand def _list_zones(root, path_transform): - """Get the zones associated with the image located at 'root'. We - return a dictionary where the keys are zone names and the values are - tuples containing zone root path and current state. The global zone is - excluded from the results. Solaris10 branded zones are excluded from the - results.""" + """Get the zones associated with the image located at 'root'. We + return a dictionary where the keys are zone names and the values are + tuples containing zone root path and current state. The global zone is + excluded from the results. Solaris10 branded zones are excluded from the + results.""" + + rv = dict() + cmd = DebugValues.get_value("bin_zoneadm") # pylint: disable=E1120 + if cmd is not None: + cmd = [cmd] + else: + cmd = ["/usr/sbin/zoneadm"] + + # if the command doesn't exist then bail. + if not li.path_exists(cmd[0]): + return rv - rv = dict() - cmd = DebugValues.get_value("bin_zoneadm") # pylint: disable=E1120 - if cmd is not None: - cmd = [cmd] - else: - cmd = ["/usr/sbin/zoneadm"] - - # if the command doesn't exist then bail. - if not li.path_exists(cmd[0]): - return rv - - # make sure "root" has a trailing '/' - root = root.rstrip(os.sep) + os.sep - - # create the zoneadm command line - cmd.extend(["-R", str(root), "list", "-cp"]) - - # execute zoneadm and save its output to a file - # open a temporary file in text mode for compatible string handling - fout = tempfile.TemporaryFile(mode="w+") - ferrout = tempfile.TemporaryFile(mode="w+") - p = pkg.pkgsubprocess.Popen(cmd, stdout=fout, stderr=ferrout) - p.wait() - if p.returncode != 0: - cmd = " ".join(cmd) - ferrout.seek(0) - errout = "".join(ferrout.readlines()) - ferrout.close() - raise apx.LinkedImageException( - cmd_failed=(p.returncode, cmd, errout)) - - # parse the command output - fout.seek(0) - output = fout.readlines() - fout.close() - for l in output: - l = l.rstrip() - - z_name, z_state, z_path, z_brand = \ - _zoneadm_list_parse(l, cmd, output) - - # skip brands that we don't care about - # W0511 XXX / FIXME Comments; pylint: disable=W0511 - # XXX: don't hard code brand names, use a brand attribute - # pylint: enable=W0511 - if z_brand not in [ - "lipkg", "solaris", "sn1", "labeled", "sparse", "pkgsrc"]: - continue - - # we don't care about the global zone. - if z_name == "global": - continue - - # append "/root" to zonepath - z_rootpath = os.path.join(z_path, "root") - assert z_rootpath.startswith(root), \ - "zone path '{0}' doesn't begin with '{1}".format( - z_rootpath, root) - - # If there is a current path transform in effect then revert - # the path reported by zoneadm to the original zone path. - if li.path_transform_applied(z_rootpath, path_transform): - z_rootpath = li.path_transform_revert(z_rootpath, - path_transform) - - # we only care about zones that have been installed - if z_state not in zone_installed_states: - continue - - rv[z_name] = (z_rootpath, z_state) + # make sure "root" has a trailing '/' + root = root.rstrip(os.sep) + os.sep + + # create the zoneadm command line + cmd.extend(["-R", str(root), "list", "-cp"]) + + # execute zoneadm and save its output to a file + # open a temporary file in text mode for compatible string handling + fout = tempfile.TemporaryFile(mode="w+") + ferrout = tempfile.TemporaryFile(mode="w+") + p = pkg.pkgsubprocess.Popen(cmd, stdout=fout, stderr=ferrout) + p.wait() + if p.returncode != 0: + cmd = " ".join(cmd) + ferrout.seek(0) + errout = "".join(ferrout.readlines()) + ferrout.close() + raise apx.LinkedImageException(cmd_failed=(p.returncode, cmd, errout)) + + # parse the command output + fout.seek(0) + output = fout.readlines() + fout.close() + for l in output: + l = l.rstrip() + + z_name, z_state, z_path, z_brand = _zoneadm_list_parse(l, cmd, output) + + # skip brands that we don't care about + # W0511 XXX / FIXME Comments; pylint: disable=W0511 + # XXX: don't hard code brand names, use a brand attribute + # pylint: enable=W0511 + if z_brand not in [ + "lipkg", + "solaris", + "sn1", + "labeled", + "sparse", + "pkgsrc", + ]: + continue + + # we don't care about the global zone. + if z_name == "global": + continue + + # append "/root" to zonepath + z_rootpath = os.path.join(z_path, "root") + assert z_rootpath.startswith( + root + ), "zone path '{0}' doesn't begin with '{1}".format(z_rootpath, root) + + # If there is a current path transform in effect then revert + # the path reported by zoneadm to the original zone path. + if li.path_transform_applied(z_rootpath, path_transform): + z_rootpath = li.path_transform_revert(z_rootpath, path_transform) + + # we only care about zones that have been installed + if z_state not in zone_installed_states: + continue + + rv[z_name] = (z_rootpath, z_state) + + return rv - return rv def list_running_zones(): - """Return dictionary with currently running zones of the system in the - following form: - { zone_name : zone_path, ... } - """ + """Return dictionary with currently running zones of the system in the + following form: + { zone_name : zone_path, ... } + """ + + zdict = _list_zones("/", li.PATH_TRANSFORM_NONE) + rzdict = {} + for z_name, (z_path, z_state) in six.iteritems(zdict): + if z_state == ZONE_STATE_STR_RUNNING: + rzdict[z_name] = z_path - zdict = _list_zones("/", li.PATH_TRANSFORM_NONE) - rzdict = {} - for z_name, (z_path, z_state) in six.iteritems(zdict): - if z_state == ZONE_STATE_STR_RUNNING: - rzdict[z_name] = z_path + return rzdict - return rzdict # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/options.py b/src/modules/client/options.py index 797e45599..3adf9188d 100644 --- a/src/modules/client/options.py +++ b/src/modules/client/options.py @@ -31,11 +31,16 @@ from pkg.client.api_errors import InvalidOptionError, LinkedImageException from pkg.client import global_settings -from pkg.client.imageconfig import DEFAULT_RECURSE, DEFAULT_CONCURRENCY, \ - TEMP_BE_ACTIVATION +from pkg.client.imageconfig import ( + DEFAULT_RECURSE, + DEFAULT_CONCURRENCY, + TEMP_BE_ACTIVATION, +) _orig_cwd = None +# fmt: off + # List of available options for common option processing. ACCEPT = "accept" ALLOW_RELINK = "allow_relink" @@ -135,777 +140,879 @@ INFO_LOCAL = "info_local" INFO_REMOTE = "info_remote" +# fmt: on + + def opts_table_cb_info(op, api_inst, opts, opts_new): - opts_new[ORIGINS] = set() - for e in opts[ORIGINS]: - opts_new[ORIGINS].add(misc.parse_uri(e, - cwd=_orig_cwd)) - if opts[ORIGINS]: - opts_new[INFO_REMOTE] = True - if opts[QUIET]: - global_settings.client_output_quiet = True - if not opts_new[INFO_LOCAL] and not opts_new[INFO_REMOTE]: - opts_new[INFO_LOCAL] = True - elif opts_new[INFO_LOCAL] and opts_new[INFO_REMOTE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [INFO_LOCAL, INFO_REMOTE]) + opts_new[ORIGINS] = set() + for e in opts[ORIGINS]: + opts_new[ORIGINS].add(misc.parse_uri(e, cwd=_orig_cwd)) + if opts[ORIGINS]: + opts_new[INFO_REMOTE] = True + if opts[QUIET]: + global_settings.client_output_quiet = True + if not opts_new[INFO_LOCAL] and not opts_new[INFO_REMOTE]: + opts_new[INFO_LOCAL] = True + elif opts_new[INFO_LOCAL] and opts_new[INFO_REMOTE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [INFO_LOCAL, INFO_REMOTE] + ) + def __parse_set_props(args): - """"Parse set property options that were specified on the command - line into a dictionary. Make sure duplicate properties were not - specified.""" - - set_props = dict() - for pv in args: - try: - p, v = pv.split("=", 1) - except ValueError: - raise InvalidOptionError(msg=_("properties to be set " - "must be of the form '='. This is " - "what was given: {0}").format(pv)) - - if p in set_props: - raise InvalidOptionError(msg=_("a property may only " - "be set once in a command. {0} was set twice" - ).format(p)) - set_props[p] = v - - return set_props + """ "Parse set property options that were specified on the command + line into a dictionary. Make sure duplicate properties were not + specified.""" + + set_props = dict() + for pv in args: + try: + p, v = pv.split("=", 1) + except ValueError: + raise InvalidOptionError( + msg=_( + "properties to be set " + "must be of the form '='. This is " + "what was given: {0}" + ).format(pv) + ) + + if p in set_props: + raise InvalidOptionError( + msg=_( + "a property may only " + "be set once in a command. {0} was set twice" + ).format(p) + ) + set_props[p] = v + + return set_props + def __parse_prop_values(args, add=True): - """"Parse add or remove property values options that were specified - on the command line into a dictionary. Make sure duplicate properties - were not specified.""" + """ "Parse add or remove property values options that were specified + on the command line into a dictionary. Make sure duplicate properties + were not specified.""" - props_values = dict() - if add: - add_txt = "added" - else: - add_txt = "removed" + props_values = dict() + if add: + add_txt = "added" + else: + add_txt = "removed" + + for pv in args: + try: + p, v = pv.split("=", 1) + except ValueError: + raise InvalidOptionError( + msg=_( + "property values to be " + "{add} must be of the form '='. " + "This is what was given: {key}" + ).format(add=add_txt, key=pv) + ) - for pv in args: - try: - p, v = pv.split("=", 1) - except ValueError: - raise InvalidOptionError(msg=_("property values to be " - "{add} must be of the form '='. " - "This is what was given: {key}").format( - add=add_txt, key=pv)) + props_values.setdefault(p, []) + props_values[p].append(v) - props_values.setdefault(p, []) - props_values[p].append(v) + return props_values - return props_values def opts_table_cb_output_format(op, api_inst, opts, opts_new): - if opts[OUTPUT_FORMAT] == None: - opts_new[OUTPUT_FORMAT] = "default" + if opts[OUTPUT_FORMAT] == None: + opts_new[OUTPUT_FORMAT] = "default" + + if QUIET in opts and opts[QUIET] and opts_new[OUTPUT_FORMAT] != "default": + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [QUIET, OUTPUT_FORMAT] + ) - if (QUIET in opts and opts[QUIET] and - opts_new[OUTPUT_FORMAT] != 'default'): - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [QUIET, OUTPUT_FORMAT]) def opts_table_cb_pub_props(op, api_inst, opts, opts_new): - opts_new[SET_PROPS] = __parse_set_props(opts[SET_PROPS]) - opts_new[ADD_PROP_VALUES] = __parse_prop_values(opts[ADD_PROP_VALUES]) - opts_new[REMOVE_PROP_VALUES] = __parse_prop_values( - opts[REMOVE_PROP_VALUES], add=False) - opts_new[UNSET_PROPS] = set(opts[UNSET_PROPS]) + opts_new[SET_PROPS] = __parse_set_props(opts[SET_PROPS]) + opts_new[ADD_PROP_VALUES] = __parse_prop_values(opts[ADD_PROP_VALUES]) + opts_new[REMOVE_PROP_VALUES] = __parse_prop_values( + opts[REMOVE_PROP_VALUES], add=False + ) + opts_new[UNSET_PROPS] = set(opts[UNSET_PROPS]) + def opts_table_cb_pub_search(op, api_inst, opts, opts_new): - if opts[SEARCH_BEFORE] and opts[SEARCH_AFTER]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [SEARCH_BEFORE, SEARCH_AFTER]) + if opts[SEARCH_BEFORE] and opts[SEARCH_AFTER]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [SEARCH_BEFORE, SEARCH_AFTER] + ) - if opts[SEARCH_BEFORE] and opts[SEARCH_FIRST]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [SEARCH_BEFORE, SEARCH_FIRST]) + if opts[SEARCH_BEFORE] and opts[SEARCH_FIRST]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [SEARCH_BEFORE, SEARCH_FIRST] + ) - if opts[SEARCH_AFTER] and opts[SEARCH_FIRST]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [SEARCH_AFTER, SEARCH_FIRST]) + if opts[SEARCH_AFTER] and opts[SEARCH_FIRST]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [SEARCH_AFTER, SEARCH_FIRST] + ) -def opts_table_cb_pub_opts(op, api_inst, opts, opts_new): - del opts_new[PUB_DISABLE] - del opts_new[PUB_ENABLE] - del opts_new[PUB_STICKY] - del opts_new[PUB_NON_STICKY] - - if opts[PUB_DISABLE] and opts[PUB_ENABLE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [PUB_DISABLE, PUB_ENABLE]) - - if opts[PUB_STICKY] and opts[PUB_NON_STICKY]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [PUB_STICKY, PUB_NON_STICKY]) - - opts_new[PUB_DISABLE] = None - if opts[PUB_DISABLE]: - opts_new[PUB_DISABLE] = True - - if opts[PUB_ENABLE]: - opts_new[PUB_DISABLE] = False - - opts_new[PUB_STICKY] = None - if opts[PUB_STICKY]: - opts_new[PUB_STICKY] = True - - if opts[PUB_NON_STICKY]: - opts_new[PUB_STICKY] = False - - if opts[ORIGIN_URI] and opts[ADD_ORIGINS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [ORIGIN_URI, ADD_ORIGINS]) - - if opts[ORIGIN_URI] and opts[REMOVE_ORIGINS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [ORIGIN_URI, REMOVE_ORIGINS]) - - if opts[REPO_URI] and opts[ADD_ORIGINS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, ADD_ORIGINS]) - if opts[REPO_URI] and opts[ADD_MIRRORS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, ADD_MIRRORS]) - if opts[REPO_URI] and opts[REMOVE_ORIGINS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, REMOVE_ORIGINS]) - if opts[REPO_URI] and opts[REMOVE_MIRRORS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, REMOVE_MIRRORS]) - if opts[REPO_URI] and opts[PUB_DISABLE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, PUB_DISABLE]) - if opts[REPO_URI] and opts[PUB_ENABLE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, PUB_ENABLE]) - if opts[REPO_URI] and not opts[REFRESH_ALLOWED]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [REPO_URI, REFRESH_ALLOWED]) - if opts[REPO_URI] and opts[RESET_UUID]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REPO_URI, RESET_UUID]) - - if opts[PROXY_URI] and not (opts[ADD_ORIGINS] or opts[ADD_MIRRORS] - or opts[REPO_URI] or opts[REMOVE_ORIGINS] or opts[REMOVE_MIRRORS]): - raise InvalidOptionError(InvalidOptionError.REQUIRED_ANY, - [PROXY_URI, ADD_ORIGINS, ADD_MIRRORS, REMOVE_ORIGINS, - REMOVE_MIRRORS, REPO_URI]) - - opts_new[ADD_ORIGINS] = set() - opts_new[REMOVE_ORIGINS] = set() - opts_new[ADD_MIRRORS] = set() - opts_new[REMOVE_MIRRORS] = set() - opts_new[ENABLE_ORIGINS] = set() - opts_new[DISABLE_ORIGINS] = set() - for e in opts[ADD_ORIGINS]: - if e == "*": - if not (opts[PUB_DISABLE] or opts[PUB_ENABLE]): - raise InvalidOptionError(InvalidOptionError.XOR, - [PUB_ENABLE, PUB_DISABLE]) - # Allow wildcard to support an easy, scriptable - # way of enabling all existing entries. - if opts[PUB_DISABLE]: - opts_new[DISABLE_ORIGINS].add("*") - if opts[PUB_ENABLE]: - opts_new[ENABLE_ORIGINS].add("*") - else: - opts_new[ADD_ORIGINS].add(misc.parse_uri(e, - cwd=_orig_cwd)) - - # If enable/disable is specified and "*" is not present, then assign - # origins collected to be added into disable/enable set as well. - if opts[PUB_DISABLE]: - if "*" not in opts_new[DISABLE_ORIGINS]: - opts_new[DISABLE_ORIGINS] = opts_new[ADD_ORIGINS] - - if opts[PUB_ENABLE]: - if "*" not in opts_new[ENABLE_ORIGINS]: - opts_new[ENABLE_ORIGINS] = opts_new[ADD_ORIGINS] - - for e in opts[REMOVE_ORIGINS]: - if e == "*": - # Allow wildcard to support an easy, scriptable - # way of removing all existing entries. - opts_new[REMOVE_ORIGINS].add("*") - else: - opts_new[REMOVE_ORIGINS].add(misc.parse_uri(e, - cwd=_orig_cwd)) - - for e in opts[ADD_MIRRORS]: - opts_new[ADD_MIRRORS].add(misc.parse_uri(e, cwd=_orig_cwd)) - for e in opts[REMOVE_MIRRORS]: - if e == "*": - # Allow wildcard to support an easy, scriptable - # way of removing all existing entries. - opts_new[REMOVE_MIRRORS].add("*") - else: - opts_new[REMOVE_MIRRORS].add(misc.parse_uri(e, - cwd=_orig_cwd)) - - if opts[REPO_URI]: - opts_new[REPO_URI] = misc.parse_uri(opts[REPO_URI], - cwd=_orig_cwd) - -def opts_table_cb_beopts(op, api_inst, opts, opts_new): - # synthesize require_new_be and deny_new_be into new_be - del opts_new[REQUIRE_NEW_BE] - del opts_new[DENY_NEW_BE] - opts_new[NEW_BE] = None - - if (opts[BE_NAME] or opts[REQUIRE_NEW_BE]) and opts[DENY_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REQUIRE_NEW_BE, DENY_NEW_BE]) - - # update BE_ACTIVATE based on BE_TEMP_ACTIVATE - if opts[BE_TEMP_ACTIVATE] and not opts[BE_ACTIVATE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [BE_ACTIVATE, BE_TEMP_ACTIVATE]) - if opts[BE_ACTIVATE] and (opts[BE_TEMP_ACTIVATE] or - api_inst.img.get_property(TEMP_BE_ACTIVATION)): - opts_new[BE_ACTIVATE] = 'bootnext' - del opts_new[BE_TEMP_ACTIVATE] - - # create a new key called BACKUP_BE in the options array - if opts[REQUIRE_NEW_BE] or opts[BE_NAME]: - opts_new[NEW_BE] = True - if opts[DENY_NEW_BE]: - opts_new[NEW_BE] = False - - # synthesize require_backup_be and no_backup_be into backup_be - del opts_new[REQUIRE_BACKUP_BE] - del opts_new[NO_BACKUP_BE] - opts_new[BACKUP_BE] = None - - if (opts[REQUIRE_BACKUP_BE] or opts[BACKUP_BE_NAME]) and \ - opts[NO_BACKUP_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REQUIRE_BACKUP_BE, NO_BACKUP_BE]) - - if (opts[REQUIRE_BACKUP_BE] or opts[BACKUP_BE_NAME]) and \ - (opts[REQUIRE_NEW_BE] or opts[BE_NAME]): - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [REQUIRE_BACKUP_BE, REQUIRE_NEW_BE]) - - # create a new key called BACKUP_BE in the options array - if opts[REQUIRE_BACKUP_BE] or opts[BACKUP_BE_NAME]: - opts_new[BACKUP_BE] = True - if opts[NO_BACKUP_BE]: - opts_new[BACKUP_BE] = False +def opts_table_cb_pub_opts(op, api_inst, opts, opts_new): + del opts_new[PUB_DISABLE] + del opts_new[PUB_ENABLE] + del opts_new[PUB_STICKY] + del opts_new[PUB_NON_STICKY] + + if opts[PUB_DISABLE] and opts[PUB_ENABLE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [PUB_DISABLE, PUB_ENABLE] + ) + + if opts[PUB_STICKY] and opts[PUB_NON_STICKY]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [PUB_STICKY, PUB_NON_STICKY] + ) + + opts_new[PUB_DISABLE] = None + if opts[PUB_DISABLE]: + opts_new[PUB_DISABLE] = True + + if opts[PUB_ENABLE]: + opts_new[PUB_DISABLE] = False + + opts_new[PUB_STICKY] = None + if opts[PUB_STICKY]: + opts_new[PUB_STICKY] = True + + if opts[PUB_NON_STICKY]: + opts_new[PUB_STICKY] = False + + if opts[ORIGIN_URI] and opts[ADD_ORIGINS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [ORIGIN_URI, ADD_ORIGINS] + ) + + if opts[ORIGIN_URI] and opts[REMOVE_ORIGINS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [ORIGIN_URI, REMOVE_ORIGINS] + ) + + if opts[REPO_URI] and opts[ADD_ORIGINS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, ADD_ORIGINS] + ) + if opts[REPO_URI] and opts[ADD_MIRRORS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, ADD_MIRRORS] + ) + if opts[REPO_URI] and opts[REMOVE_ORIGINS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, REMOVE_ORIGINS] + ) + if opts[REPO_URI] and opts[REMOVE_MIRRORS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, REMOVE_MIRRORS] + ) + if opts[REPO_URI] and opts[PUB_DISABLE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, PUB_DISABLE] + ) + if opts[REPO_URI] and opts[PUB_ENABLE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, PUB_ENABLE] + ) + if opts[REPO_URI] and not opts[REFRESH_ALLOWED]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [REPO_URI, REFRESH_ALLOWED] + ) + if opts[REPO_URI] and opts[RESET_UUID]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REPO_URI, RESET_UUID] + ) + + if opts[PROXY_URI] and not ( + opts[ADD_ORIGINS] + or opts[ADD_MIRRORS] + or opts[REPO_URI] + or opts[REMOVE_ORIGINS] + or opts[REMOVE_MIRRORS] + ): + raise InvalidOptionError( + InvalidOptionError.REQUIRED_ANY, + [ + PROXY_URI, + ADD_ORIGINS, + ADD_MIRRORS, + REMOVE_ORIGINS, + REMOVE_MIRRORS, + REPO_URI, + ], + ) + + opts_new[ADD_ORIGINS] = set() + opts_new[REMOVE_ORIGINS] = set() + opts_new[ADD_MIRRORS] = set() + opts_new[REMOVE_MIRRORS] = set() + opts_new[ENABLE_ORIGINS] = set() + opts_new[DISABLE_ORIGINS] = set() + for e in opts[ADD_ORIGINS]: + if e == "*": + if not (opts[PUB_DISABLE] or opts[PUB_ENABLE]): + raise InvalidOptionError( + InvalidOptionError.XOR, [PUB_ENABLE, PUB_DISABLE] + ) + # Allow wildcard to support an easy, scriptable + # way of enabling all existing entries. + if opts[PUB_DISABLE]: + opts_new[DISABLE_ORIGINS].add("*") + if opts[PUB_ENABLE]: + opts_new[ENABLE_ORIGINS].add("*") + else: + opts_new[ADD_ORIGINS].add(misc.parse_uri(e, cwd=_orig_cwd)) + + # If enable/disable is specified and "*" is not present, then assign + # origins collected to be added into disable/enable set as well. + if opts[PUB_DISABLE]: + if "*" not in opts_new[DISABLE_ORIGINS]: + opts_new[DISABLE_ORIGINS] = opts_new[ADD_ORIGINS] + + if opts[PUB_ENABLE]: + if "*" not in opts_new[ENABLE_ORIGINS]: + opts_new[ENABLE_ORIGINS] = opts_new[ADD_ORIGINS] + + for e in opts[REMOVE_ORIGINS]: + if e == "*": + # Allow wildcard to support an easy, scriptable + # way of removing all existing entries. + opts_new[REMOVE_ORIGINS].add("*") + else: + opts_new[REMOVE_ORIGINS].add(misc.parse_uri(e, cwd=_orig_cwd)) + + for e in opts[ADD_MIRRORS]: + opts_new[ADD_MIRRORS].add(misc.parse_uri(e, cwd=_orig_cwd)) + for e in opts[REMOVE_MIRRORS]: + if e == "*": + # Allow wildcard to support an easy, scriptable + # way of removing all existing entries. + opts_new[REMOVE_MIRRORS].add("*") + else: + opts_new[REMOVE_MIRRORS].add(misc.parse_uri(e, cwd=_orig_cwd)) -def opts_table_cb_li_ignore(op, api_inst, opts, opts_new): + if opts[REPO_URI]: + opts_new[REPO_URI] = misc.parse_uri(opts[REPO_URI], cwd=_orig_cwd) - # synthesize li_ignore_all and li_ignore_list into li_ignore - del opts_new[LI_IGNORE_ALL] - del opts_new[LI_IGNORE_LIST] - opts_new[LI_IGNORE] = None - # check if there's nothing to ignore - if not opts[LI_IGNORE_ALL] and not opts[LI_IGNORE_LIST]: - return +def opts_table_cb_beopts(op, api_inst, opts, opts_new): + # synthesize require_new_be and deny_new_be into new_be + del opts_new[REQUIRE_NEW_BE] + del opts_new[DENY_NEW_BE] + opts_new[NEW_BE] = None + + if (opts[BE_NAME] or opts[REQUIRE_NEW_BE]) and opts[DENY_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REQUIRE_NEW_BE, DENY_NEW_BE] + ) + + # update BE_ACTIVATE based on BE_TEMP_ACTIVATE + if opts[BE_TEMP_ACTIVATE] and not opts[BE_ACTIVATE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [BE_ACTIVATE, BE_TEMP_ACTIVATE] + ) + if opts[BE_ACTIVATE] and ( + opts[BE_TEMP_ACTIVATE] or api_inst.img.get_property(TEMP_BE_ACTIVATION) + ): + opts_new[BE_ACTIVATE] = "bootnext" + del opts_new[BE_TEMP_ACTIVATE] + + # create a new key called BACKUP_BE in the options array + if opts[REQUIRE_NEW_BE] or opts[BE_NAME]: + opts_new[NEW_BE] = True + if opts[DENY_NEW_BE]: + opts_new[NEW_BE] = False + + # synthesize require_backup_be and no_backup_be into backup_be + del opts_new[REQUIRE_BACKUP_BE] + del opts_new[NO_BACKUP_BE] + opts_new[BACKUP_BE] = None + + if (opts[REQUIRE_BACKUP_BE] or opts[BACKUP_BE_NAME]) and opts[NO_BACKUP_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REQUIRE_BACKUP_BE, NO_BACKUP_BE] + ) + + if (opts[REQUIRE_BACKUP_BE] or opts[BACKUP_BE_NAME]) and ( + opts[REQUIRE_NEW_BE] or opts[BE_NAME] + ): + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [REQUIRE_BACKUP_BE, REQUIRE_NEW_BE] + ) + + # create a new key called BACKUP_BE in the options array + if opts[REQUIRE_BACKUP_BE] or opts[BACKUP_BE_NAME]: + opts_new[BACKUP_BE] = True + if opts[NO_BACKUP_BE]: + opts_new[BACKUP_BE] = False - if opts[LI_IGNORE_ALL]: - # can't ignore all and specific images - if opts[LI_IGNORE_LIST]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_ALL, LI_IGNORE_LIST]) - - # can't ignore all and target anything. - if LI_TARGET_ALL in opts and opts[LI_TARGET_ALL]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_ALL, LI_TARGET_ALL]) - if LI_TARGET_LIST in opts and opts[LI_TARGET_LIST]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_ALL, LI_TARGET_LIST]) - if LI_NAME in opts and opts[LI_NAME]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_ALL, LI_NAME]) - opts_new[LI_IGNORE] = [] - return - - assert opts[LI_IGNORE_LIST] - - # it doesn't make sense to specify images to ignore if the - # user is already specifying images to operate on. +def opts_table_cb_li_ignore(op, api_inst, opts, opts_new): + # synthesize li_ignore_all and li_ignore_list into li_ignore + del opts_new[LI_IGNORE_ALL] + del opts_new[LI_IGNORE_LIST] + opts_new[LI_IGNORE] = None + + # check if there's nothing to ignore + if not opts[LI_IGNORE_ALL] and not opts[LI_IGNORE_LIST]: + return + + if opts[LI_IGNORE_ALL]: + # can't ignore all and specific images + if opts[LI_IGNORE_LIST]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_ALL, LI_IGNORE_LIST] + ) + + # can't ignore all and target anything. if LI_TARGET_ALL in opts and opts[LI_TARGET_ALL]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_LIST, LI_TARGET_ALL]) + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_ALL, LI_TARGET_ALL] + ) if LI_TARGET_LIST in opts and opts[LI_TARGET_LIST]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_LIST, LI_TARGET_LIST]) + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_ALL, LI_TARGET_LIST] + ) if LI_NAME in opts and opts[LI_NAME]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_IGNORE_LIST, LI_NAME]) + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_ALL, LI_NAME] + ) + opts_new[LI_IGNORE] = [] + return + + assert opts[LI_IGNORE_LIST] + + # it doesn't make sense to specify images to ignore if the + # user is already specifying images to operate on. + if LI_TARGET_ALL in opts and opts[LI_TARGET_ALL]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_LIST, LI_TARGET_ALL] + ) + if LI_TARGET_LIST in opts and opts[LI_TARGET_LIST]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_LIST, LI_TARGET_LIST] + ) + if LI_NAME in opts and opts[LI_NAME]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_IGNORE_LIST, LI_NAME] + ) + + li_ignore = [] + for li_name in opts[LI_IGNORE_LIST]: + # check for repeats + if li_name in li_ignore: + raise InvalidOptionError( + InvalidOptionError.ARG_REPEAT, [li_name, LI_IGNORE_LIST] + ) + # add to ignore list + li_ignore.append(li_name) + + opts_new[LI_IGNORE] = api_inst.parse_linked_name_list(li_ignore) - li_ignore = [] - for li_name in opts[LI_IGNORE_LIST]: - # check for repeats - if li_name in li_ignore: - raise InvalidOptionError( - InvalidOptionError.ARG_REPEAT, [li_name, - LI_IGNORE_LIST]) - # add to ignore list - li_ignore.append(li_name) - - opts_new[LI_IGNORE] = api_inst.parse_linked_name_list(li_ignore) def opts_table_cb_li_no_psync(op, api_inst, opts, opts_new): - # if a target child linked image was specified, the no-parent-sync - # option doesn't make sense since we know that both the parent and - # child image are accessible + # if a target child linked image was specified, the no-parent-sync + # option doesn't make sense since we know that both the parent and + # child image are accessible - if LI_TARGET_ALL not in opts: - # we don't accept linked image target options - assert LI_TARGET_LIST not in opts - return + if LI_TARGET_ALL not in opts: + # we don't accept linked image target options + assert LI_TARGET_LIST not in opts + return - if opts[LI_TARGET_ALL] and not opts[LI_PARENT_SYNC]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [LI_TARGET_ALL, LI_PARENT_SYNC]) + if opts[LI_TARGET_ALL] and not opts[LI_PARENT_SYNC]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [LI_TARGET_ALL, LI_PARENT_SYNC] + ) - if opts[LI_TARGET_LIST] and not opts[LI_PARENT_SYNC]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [LI_TARGET_LIST, LI_PARENT_SYNC]) + if opts[LI_TARGET_LIST] and not opts[LI_PARENT_SYNC]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [LI_TARGET_LIST, LI_PARENT_SYNC] + ) -def opts_table_cb_unpackaged(op, api_inst, opts, opts_new): - # Check whether unpackaged and unpackaged_only options are used - # together. - if opts[UNPACKAGED] and opts[UNPACKAGED_ONLY]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [UNPACKAGED, UNPACKAGED_ONLY]) +def opts_table_cb_unpackaged(op, api_inst, opts, opts_new): + # Check whether unpackaged and unpackaged_only options are used + # together. -def opts_table_cb_path_no_unpackaged(op, api_inst, opts, opts_new): - # Check whether path options is used with either unpackaged - # or unpackaged_only options. + if opts[UNPACKAGED] and opts[UNPACKAGED_ONLY]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [UNPACKAGED, UNPACKAGED_ONLY] + ) - if opts[VERIFY_PATHS] and opts[UNPACKAGED]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [VERIFY_PATHS, UNPACKAGED]) - if opts[VERIFY_PATHS] and opts[UNPACKAGED_ONLY]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [VERIFY_PATHS, UNPACKAGED_ONLY]) +def opts_table_cb_path_no_unpackaged(op, api_inst, opts, opts_new): + # Check whether path options is used with either unpackaged + # or unpackaged_only options. -def __parse_linked_props(args): - """"Parse linked image property options that were specified on the - command line into a dictionary. Make sure duplicate properties were - not specified.""" + if opts[VERIFY_PATHS] and opts[UNPACKAGED]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [VERIFY_PATHS, UNPACKAGED] + ) - linked_props = dict() - for pv in args: - try: - p, v = pv.split("=", 1) - except ValueError: - raise InvalidOptionError(msg=_("linked image " - "property arguments must be of the form " - "'='.")) + if opts[VERIFY_PATHS] and opts[UNPACKAGED_ONLY]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [VERIFY_PATHS, UNPACKAGED_ONLY] + ) - if p not in li.prop_values: - raise InvalidOptionError(msg=_("invalid linked " - "image property: '{0}'.").format(p)) - if p in linked_props: - raise InvalidOptionError(msg=_("linked image " - "property specified multiple times: " - "'{0}'.").format(p)) +def __parse_linked_props(args): + """ "Parse linked image property options that were specified on the + command line into a dictionary. Make sure duplicate properties were + not specified.""" - linked_props[p] = v + linked_props = dict() + for pv in args: + try: + p, v = pv.split("=", 1) + except ValueError: + raise InvalidOptionError( + msg=_( + "linked image " + "property arguments must be of the form " + "'='." + ) + ) + + if p not in li.prop_values: + raise InvalidOptionError( + msg=_("invalid linked " "image property: '{0}'.").format(p) + ) + + if p in linked_props: + raise InvalidOptionError( + msg=_( + "linked image " + "property specified multiple times: " + "'{0}'." + ).format(p) + ) + + linked_props[p] = v + + return linked_props - return linked_props def opts_table_cb_li_props(op, api_inst, opts, opts_new): - """convert linked image prop list into a dictionary""" + """convert linked image prop list into a dictionary""" + + opts_new[LI_PROPS] = __parse_linked_props(opts[LI_PROPS]) - opts_new[LI_PROPS] = __parse_linked_props(opts[LI_PROPS]) def opts_table_cb_li_target(op, api_inst, opts, opts_new): - # figure out which option the user specified - if opts[LI_TARGET_ALL] and opts[LI_TARGET_LIST]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_TARGET_ALL, LI_TARGET_LIST]) - elif opts[LI_TARGET_ALL]: - arg1 = LI_TARGET_ALL - elif opts[LI_TARGET_LIST]: - arg1 = LI_TARGET_LIST - else: - return - - if BE_ACTIVATE in opts and not opts[BE_ACTIVATE]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [arg1, BE_ACTIVATE]) - if BE_NAME in opts and opts[BE_NAME]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, BE_NAME]) - if DENY_NEW_BE in opts and opts[DENY_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, DENY_NEW_BE]) - if REQUIRE_NEW_BE in opts and opts[REQUIRE_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, REQUIRE_NEW_BE]) - if REJECT_PATS in opts and opts[REJECT_PATS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, REJECT_PATS]) - if ORIGINS in opts and opts[ORIGINS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, ORIGINS]) - - # validate linked image name - li_target_list = [] - for li_name in opts[LI_TARGET_LIST]: - # check for repeats - if li_name in li_target_list: - raise InvalidOptionError( - InvalidOptionError.ARG_REPEAT, [li_name, - LI_TARGET_LIST]) - # add to ignore list - li_target_list.append(li_name) - - opts_new[LI_TARGET_LIST] = \ - api_inst.parse_linked_name_list(li_target_list) + # figure out which option the user specified + if opts[LI_TARGET_ALL] and opts[LI_TARGET_LIST]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_TARGET_ALL, LI_TARGET_LIST] + ) + elif opts[LI_TARGET_ALL]: + arg1 = LI_TARGET_ALL + elif opts[LI_TARGET_LIST]: + arg1 = LI_TARGET_LIST + else: + return + + if BE_ACTIVATE in opts and not opts[BE_ACTIVATE]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [arg1, BE_ACTIVATE] + ) + if BE_NAME in opts and opts[BE_NAME]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [arg1, BE_NAME]) + if DENY_NEW_BE in opts and opts[DENY_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, DENY_NEW_BE] + ) + if REQUIRE_NEW_BE in opts and opts[REQUIRE_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, REQUIRE_NEW_BE] + ) + if REJECT_PATS in opts and opts[REJECT_PATS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, REJECT_PATS] + ) + if ORIGINS in opts and opts[ORIGINS]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [arg1, ORIGINS]) + + # validate linked image name + li_target_list = [] + for li_name in opts[LI_TARGET_LIST]: + # check for repeats + if li_name in li_target_list: + raise InvalidOptionError( + InvalidOptionError.ARG_REPEAT, [li_name, LI_TARGET_LIST] + ) + # add to ignore list + li_target_list.append(li_name) + + opts_new[LI_TARGET_LIST] = api_inst.parse_linked_name_list(li_target_list) + def opts_table_cb_li_target1(op, api_inst, opts, opts_new): - # figure out which option the user specified - if opts[LI_NAME]: - arg1 = LI_NAME - else: - return - - if BE_ACTIVATE in opts and not opts[BE_ACTIVATE]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [arg1, BE_ACTIVATE]) - if BE_NAME in opts and opts[BE_NAME]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, BE_NAME]) - if DENY_NEW_BE in opts and opts[DENY_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, DENY_NEW_BE]) - if REQUIRE_NEW_BE in opts and opts[REQUIRE_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, REQUIRE_NEW_BE]) - if REJECT_PATS in opts and opts[REJECT_PATS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, REJECT_PATS]) - if ORIGINS in opts and opts[ORIGINS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, ORIGINS]) + # figure out which option the user specified + if opts[LI_NAME]: + arg1 = LI_NAME + else: + return + + if BE_ACTIVATE in opts and not opts[BE_ACTIVATE]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [arg1, BE_ACTIVATE] + ) + if BE_NAME in opts and opts[BE_NAME]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [arg1, BE_NAME]) + if DENY_NEW_BE in opts and opts[DENY_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, DENY_NEW_BE] + ) + if REQUIRE_NEW_BE in opts and opts[REQUIRE_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, REQUIRE_NEW_BE] + ) + if REJECT_PATS in opts and opts[REJECT_PATS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, REJECT_PATS] + ) + if ORIGINS in opts and opts[ORIGINS]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [arg1, ORIGINS]) -def opts_table_cb_li_recurse(op, api_inst, opts, opts_new): - # Just LI_ERECURSE is preserved in the final options and that is - # set to the list of child images selected for recursion. - del opts_new[LI_ERECURSE_INCL] - del opts_new[LI_ERECURSE_EXCL] - del opts_new[LI_ERECURSE_ALL] - del opts_new[LI_ERECURSE_NONE] - - if (op in [ - pkgdefs.PKG_OP_APPLY_HOT_FIX, - pkgdefs.PKG_OP_CHANGE_FACET, - pkgdefs.PKG_OP_CHANGE_VARIANT, - pkgdefs.PKG_OP_SET_MEDIATOR, - pkgdefs.PKG_OP_SET_PUBLISHER, - pkgdefs.PKG_OP_UPDATE, - ] - and api_inst.img.get_property(DEFAULT_RECURSE) - and not opts[LI_ERECURSE_NONE]): - opts[LI_ERECURSE_ALL] = True - - if opts[LI_ERECURSE_EXCL] and not opts[LI_ERECURSE_ALL]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [LI_ERECURSE_EXCL, LI_ERECURSE_ALL]) - - if opts[LI_ERECURSE_INCL] and not opts[LI_ERECURSE_ALL]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [LI_ERECURSE_INCL, LI_ERECURSE_ALL]) - - if opts[LI_ERECURSE_INCL] and opts[LI_ERECURSE_EXCL]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [LI_ERECURSE_INCL, LI_ERECURSE_EXCL]) - - if not opts[LI_ERECURSE_ALL]: - opts_new[LI_ERECURSE] = None - return - - # Go through all children and check if they are in the recurse list. - li_child_targets = [] - li_child_list = set([ - lin - for lin, rel, path in api_inst.list_linked() - if rel == "child" - ]) - - def parse_lin(ulin): - lin = None - try: - lin = api_inst.parse_linked_name(ulin, - allow_unknown=True) - except LinkedImageException as e: - try: - lin = api_inst.parse_linked_name( - "zone:{0}".format(ulin), allow_unknown=True) - except LinkedImageException as e: - pass - if lin is None or lin not in li_child_list: - raise InvalidOptionError(msg= - _("invalid linked image or zone name " - "'{0}'.").format(ulin)) - - return lin - - if opts[LI_ERECURSE_INCL]: - # include list specified - for ulin in opts[LI_ERECURSE_INCL]: - li_child_targets.append(parse_lin(ulin)) - opts_new[LI_ERECURSE] = li_child_targets - else: - # exclude list specified - for ulin in opts[LI_ERECURSE_EXCL]: - li_child_list.remove(parse_lin(ulin)) - opts_new[LI_ERECURSE] = li_child_list +def opts_table_cb_li_recurse(op, api_inst, opts, opts_new): + # Just LI_ERECURSE is preserved in the final options and that is + # set to the list of child images selected for recursion. + del opts_new[LI_ERECURSE_INCL] + del opts_new[LI_ERECURSE_EXCL] + del opts_new[LI_ERECURSE_ALL] + del opts_new[LI_ERECURSE_NONE] + + if ( + op + in [ + pkgdefs.PKG_OP_APPLY_HOT_FIX, + pkgdefs.PKG_OP_CHANGE_FACET, + pkgdefs.PKG_OP_CHANGE_VARIANT, + pkgdefs.PKG_OP_SET_MEDIATOR, + pkgdefs.PKG_OP_SET_PUBLISHER, + pkgdefs.PKG_OP_UPDATE, + ] + and api_inst.img.get_property(DEFAULT_RECURSE) + and not opts[LI_ERECURSE_NONE] + ): + opts[LI_ERECURSE_ALL] = True + + if opts[LI_ERECURSE_EXCL] and not opts[LI_ERECURSE_ALL]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [LI_ERECURSE_EXCL, LI_ERECURSE_ALL] + ) + + if opts[LI_ERECURSE_INCL] and not opts[LI_ERECURSE_ALL]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [LI_ERECURSE_INCL, LI_ERECURSE_ALL] + ) + + if opts[LI_ERECURSE_INCL] and opts[LI_ERECURSE_EXCL]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [LI_ERECURSE_INCL, LI_ERECURSE_EXCL] + ) + + if not opts[LI_ERECURSE_ALL]: + opts_new[LI_ERECURSE] = None + return + + # Go through all children and check if they are in the recurse list. + li_child_targets = [] + li_child_list = set( + [lin for lin, rel, path in api_inst.list_linked() if rel == "child"] + ) + + def parse_lin(ulin): + lin = None + try: + lin = api_inst.parse_linked_name(ulin, allow_unknown=True) + except LinkedImageException as e: + try: + lin = api_inst.parse_linked_name( + "zone:{0}".format(ulin), allow_unknown=True + ) + except LinkedImageException as e: + pass + if lin is None or lin not in li_child_list: + raise InvalidOptionError( + msg=_("invalid linked image or zone name " "'{0}'.").format( + ulin + ) + ) + + return lin + + if opts[LI_ERECURSE_INCL]: + # include list specified + for ulin in opts[LI_ERECURSE_INCL]: + li_child_targets.append(parse_lin(ulin)) + opts_new[LI_ERECURSE] = li_child_targets + else: + # exclude list specified + for ulin in opts[LI_ERECURSE_EXCL]: + li_child_list.remove(parse_lin(ulin)) + opts_new[LI_ERECURSE] = li_child_list + + # If we use image recursion we need to make sure uninstall and update + # ignore non-existing packages in the parent image. + if opts_new[LI_ERECURSE] and IGNORE_MISSING in opts: + opts_new[IGNORE_MISSING] = True - # If we use image recursion we need to make sure uninstall and update - # ignore non-existing packages in the parent image. - if opts_new[LI_ERECURSE] and IGNORE_MISSING in opts: - opts_new[IGNORE_MISSING] = True def opts_table_cb_no_headers_vs_quiet(op, api_inst, opts, opts_new): - # check if we accept the -q option - if QUIET not in opts: - return + # check if we accept the -q option + if QUIET not in opts: + return + + # -q implies -H + if opts[QUIET]: + opts_new[OMIT_HEADERS] = True - # -q implies -H - if opts[QUIET]: - opts_new[OMIT_HEADERS] = True def opts_table_cb_q(op, api_inst, opts, opts_new): - # Be careful not to overwrite global_settings.client_output_quiet - # because it might be set "True" from elsewhere, e.g. in - # opts_table_cb_parsable. - if opts[QUIET] is True: - global_settings.client_output_quiet = True + # Be careful not to overwrite global_settings.client_output_quiet + # because it might be set "True" from elsewhere, e.g. in + # opts_table_cb_parsable. + if opts[QUIET] is True: + global_settings.client_output_quiet = True + def opts_table_cb_v(op, api_inst, opts, opts_new): - global_settings.client_output_verbose = opts[VERBOSE] + global_settings.client_output_verbose = opts[VERBOSE] + def opts_table_cb_nqv(op, api_inst, opts, opts_new): - if opts[VERBOSE] and opts[QUIET]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [VERBOSE, QUIET]) + if opts[VERBOSE] and opts[QUIET]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [VERBOSE, QUIET]) + def opts_table_cb_publishers(op, api_inst, opts, opts_new): - publishers = set() - for p in opts[PUBLISHERS]: - publishers.add(p) - opts_new[PUBLISHERS] = publishers + publishers = set() + for p in opts[PUBLISHERS]: + publishers.add(p) + opts_new[PUBLISHERS] = publishers + def opts_table_cb_parsable(op, api_inst, opts, opts_new): - if opts[PARSABLE_VERSION] is not None and opts.get(VERBOSE, False): - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [VERBOSE, PARSABLE_VERSION]) - if opts[PARSABLE_VERSION] is not None and opts.get(OMIT_HEADERS, - False): - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [OMIT_HEADERS, PARSABLE_VERSION]) - if opts[PARSABLE_VERSION] is not None: - try: - opts_new[PARSABLE_VERSION] = int( - opts[PARSABLE_VERSION]) - except ValueError: - raise InvalidOptionError( - options=[PARSABLE_VERSION], - msg=_("integer argument expected")) - - global_settings.client_output_parsable_version = \ - opts_new[PARSABLE_VERSION] - opts_new[QUIET] = True - global_settings.client_output_quiet = True + if opts[PARSABLE_VERSION] is not None and opts.get(VERBOSE, False): + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [VERBOSE, PARSABLE_VERSION] + ) + if opts[PARSABLE_VERSION] is not None and opts.get(OMIT_HEADERS, False): + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [OMIT_HEADERS, PARSABLE_VERSION] + ) + if opts[PARSABLE_VERSION] is not None: + try: + opts_new[PARSABLE_VERSION] = int(opts[PARSABLE_VERSION]) + except ValueError: + raise InvalidOptionError( + options=[PARSABLE_VERSION], msg=_("integer argument expected") + ) + + global_settings.client_output_parsable_version = opts_new[ + PARSABLE_VERSION + ] + opts_new[QUIET] = True + global_settings.client_output_quiet = True + def opts_table_cb_origins(op, api_inst, opts, opts_new): - origins = set() - for o in opts[ORIGINS]: - origins.add(misc.parse_uri(o, cwd=_orig_cwd)) - opts_new[ORIGINS] = origins + origins = set() + for o in opts[ORIGINS]: + origins.add(misc.parse_uri(o, cwd=_orig_cwd)) + opts_new[ORIGINS] = origins + def opts_table_cb_stage(op, api_inst, opts, opts_new): - if opts[STAGE] == None: - opts_new[STAGE] = pkgdefs.API_STAGE_DEFAULT - return + if opts[STAGE] == None: + opts_new[STAGE] = pkgdefs.API_STAGE_DEFAULT + return - if opts_new[STAGE] not in pkgdefs.api_stage_values: - raise InvalidOptionError(msg=_("invalid operation stage: " - "'{0}'").format(opts[STAGE])) + if opts_new[STAGE] not in pkgdefs.api_stage_values: + raise InvalidOptionError( + msg=_("invalid operation stage: " "'{0}'").format(opts[STAGE]) + ) -def opts_cb_li_attach(op, api_inst, opts, opts_new): - if opts[ATTACH_PARENT] and opts[ATTACH_CHILD]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [ATTACH_PARENT, ATTACH_CHILD]) - - if not opts[ATTACH_PARENT] and not opts[ATTACH_CHILD]: - raise InvalidOptionError(InvalidOptionError.XOR, - [ATTACH_PARENT, ATTACH_CHILD]) - - if opts[ATTACH_CHILD]: - # if we're attaching a new child then that doesn't affect - # any other children, so ignoring them doesn't make sense. - if opts[LI_IGNORE_ALL]: - raise InvalidOptionError( - InvalidOptionError.INCOMPAT, - [ATTACH_CHILD, LI_IGNORE_ALL]) - if opts[LI_IGNORE_LIST]: - raise InvalidOptionError( - InvalidOptionError.INCOMPAT, - [ATTACH_CHILD, LI_IGNORE_LIST]) -def opts_table_cb_md_only(op, api_inst, opts, opts_new): - # if the user didn't specify linked-md-only we're done - if not opts[LI_MD_ONLY]: - return - - # li_md_only implies no li_pkg_updates - if LI_PKG_UPDATES in opts: - opts_new[LI_PKG_UPDATES] = False - - # - # if li_md_only is false that means we're not updating any packages - # within the current image so there are a ton of options that no - # longer apply to the current operation, and hence are incompatible - # with li_md_only. - # - arg1 = LI_MD_ONLY - if BE_NAME in opts and opts[BE_NAME]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, BE_NAME]) - if DENY_NEW_BE in opts and opts[DENY_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, DENY_NEW_BE]) - if REQUIRE_NEW_BE in opts and opts[REQUIRE_NEW_BE]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, REQUIRE_NEW_BE]) - if LI_PARENT_SYNC in opts and not opts[LI_PARENT_SYNC]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [arg1, LI_PARENT_SYNC]) - if REJECT_PATS in opts and opts[REJECT_PATS]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [arg1, REJECT_PATS]) - -list_incompat_options = [ - [ ORIGINS, LIST_UPGRADABLE ], - [ ORIGINS, LIST_REMOVABLE ], +def opts_cb_li_attach(op, api_inst, opts, opts_new): + if opts[ATTACH_PARENT] and opts[ATTACH_CHILD]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [ATTACH_PARENT, ATTACH_CHILD] + ) + + if not opts[ATTACH_PARENT] and not opts[ATTACH_CHILD]: + raise InvalidOptionError( + InvalidOptionError.XOR, [ATTACH_PARENT, ATTACH_CHILD] + ) + + if opts[ATTACH_CHILD]: + # if we're attaching a new child then that doesn't affect + # any other children, so ignoring them doesn't make sense. + if opts[LI_IGNORE_ALL]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [ATTACH_CHILD, LI_IGNORE_ALL] + ) + if opts[LI_IGNORE_LIST]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [ATTACH_CHILD, LI_IGNORE_LIST] + ) - [ LIST_INSTALLED_NEWEST, LIST_NEWEST ], - [ LIST_INSTALLED_NEWEST, LIST_UPGRADABLE], - [ LIST_INSTALLED_NEWEST, LIST_REMOVABLE], - [ LIST_UPGRADABLE, LIST_REMOVABLE], - [ LIST_MANUAL, LIST_NOT_MANUAL], +def opts_table_cb_md_only(op, api_inst, opts, opts_new): + # if the user didn't specify linked-md-only we're done + if not opts[LI_MD_ONLY]: + return + + # li_md_only implies no li_pkg_updates + if LI_PKG_UPDATES in opts: + opts_new[LI_PKG_UPDATES] = False + + # + # if li_md_only is false that means we're not updating any packages + # within the current image so there are a ton of options that no + # longer apply to the current operation, and hence are incompatible + # with li_md_only. + # + arg1 = LI_MD_ONLY + if BE_NAME in opts and opts[BE_NAME]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [arg1, BE_NAME]) + if DENY_NEW_BE in opts and opts[DENY_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, DENY_NEW_BE] + ) + if REQUIRE_NEW_BE in opts and opts[REQUIRE_NEW_BE]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, REQUIRE_NEW_BE] + ) + if LI_PARENT_SYNC in opts and not opts[LI_PARENT_SYNC]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [arg1, LI_PARENT_SYNC] + ) + if REJECT_PATS in opts and opts[REJECT_PATS]: + raise InvalidOptionError( + InvalidOptionError.INCOMPAT, [arg1, REJECT_PATS] + ) - [ LIST_INSTALLABLE, LIST_MANUAL ], - [ LIST_INSTALLABLE, LIST_NOT_MANUAL ], - [ LIST_INSTALLABLE, LIST_REMOVABLE ], - [ LIST_INSTALLABLE, LIST_UPGRADABLE ], - [ SUMMARY, VERBOSE], - [ QUIET, VERBOSE], +list_incompat_options = [ + [ORIGINS, LIST_UPGRADABLE], + [ORIGINS, LIST_REMOVABLE], + [LIST_INSTALLED_NEWEST, LIST_NEWEST], + [LIST_INSTALLED_NEWEST, LIST_UPGRADABLE], + [LIST_INSTALLED_NEWEST, LIST_REMOVABLE], + [LIST_UPGRADABLE, LIST_REMOVABLE], + [LIST_MANUAL, LIST_NOT_MANUAL], + [LIST_INSTALLABLE, LIST_MANUAL], + [LIST_INSTALLABLE, LIST_NOT_MANUAL], + [LIST_INSTALLABLE, LIST_REMOVABLE], + [LIST_INSTALLABLE, LIST_UPGRADABLE], + [SUMMARY, VERBOSE], + [QUIET, VERBOSE], ] + def opts_cb_list(op, api_inst, opts, opts_new): + if opts_new[LIST_ALL_REMOVABLE]: + opts_new[LIST_REMOVABLE] = True - if opts_new[LIST_ALL_REMOVABLE]: - opts_new[LIST_REMOVABLE] = True + for a, b in list_incompat_options: + if opts_new[a] and opts_new[b]: + raise InvalidOptionError(InvalidOptionError.INCOMPAT, [a, b]) - for (a, b) in list_incompat_options: - if opts_new[a] and opts_new[b]: - raise InvalidOptionError(InvalidOptionError.INCOMPAT, - [a, b]) + if opts_new[ORIGINS] and not opts_new[LIST_NEWEST]: + # Use of -g implies -a unless -n is provided. + opts_new[LIST_INSTALLED_NEWEST] = True - if opts_new[ORIGINS] and not opts_new[LIST_NEWEST]: - # Use of -g implies -a unless -n is provided. - opts_new[LIST_INSTALLED_NEWEST] = True + if opts_new[LIST_ALL] and not opts_new[LIST_INSTALLED_NEWEST]: + raise InvalidOptionError( + InvalidOptionError.REQUIRED, [LIST_ALL, LIST_INSTALLED_NEWEST] + ) - if opts_new[LIST_ALL] and not opts_new[LIST_INSTALLED_NEWEST]: - raise InvalidOptionError(InvalidOptionError.REQUIRED, - [LIST_ALL, LIST_INSTALLED_NEWEST]) + if opts_new[LIST_INSTALLABLE] and not opts_new[LIST_INSTALLED_NEWEST]: + # Use of -i implies -n unless -a is provided. + opts_new[LIST_NEWEST] = True - if opts_new[LIST_INSTALLABLE] and not opts_new[LIST_INSTALLED_NEWEST]: - # Use of -i implies -n unless -a is provided. - opts_new[LIST_NEWEST] = True def opts_cb_int(k, api_inst, opts, opts_new, minimum=None): + if k not in opts or opts[k] == None: + err = _("missing required parameter") + raise InvalidOptionError(msg=err, options=[k]) - if k not in opts or opts[k] == None: - err = _("missing required parameter") - raise InvalidOptionError(msg=err, options=[k]) + # get the original argument value + v = opts[k] - # get the original argument value - v = opts[k] + # make sure it is an integer + try: + v = int(v) + except (ValueError, TypeError): + # not a valid integer + err = _("value '{0}' invalid").format(v) + raise InvalidOptionError(msg=err, options=[k]) - # make sure it is an integer - try: - v = int(v) - except (ValueError, TypeError): - # not a valid integer - err = _("value '{0}' invalid").format(v) - raise InvalidOptionError(msg=err, options=[k]) + # check the minimum bounds + if minimum is not None and v < minimum: + err = _("value must be >= {0:d}").format(minimum) + raise InvalidOptionError(msg=err, options=[k]) - # check the minimum bounds - if minimum is not None and v < minimum: - err = _("value must be >= {0:d}").format(minimum) - raise InvalidOptionError(msg=err, options=[k]) + # update the new options array to make the value an integer + opts_new[k] = v - # update the new options array to make the value an integer - opts_new[k] = v def opts_cb_fd(k, api_inst, opts, opts_new): - opts_cb_int(k, api_inst, opts, opts_new, minimum=0) + opts_cb_int(k, api_inst, opts, opts_new, minimum=0) + + err = _("value '{0}' invalid").format(opts_new[k]) + try: + os.fstat(opts_new[k]) + except OSError: + # not a valid file descriptor + raise InvalidOptionError(msg=err, options=[k]) - err = _("value '{0}' invalid").format(opts_new[k]) - try: - os.fstat(opts_new[k]) - except OSError: - # not a valid file descriptor - raise InvalidOptionError(msg=err, options=[k]) def opts_table_cb_concurrency(op, api_inst, opts, opts_new): - if opts[CONCURRENCY] is None: - # If the concurrency has been set by an environment variable - # then client_concurrency_set will be true. Don't override with - # the image default in this case. - if not global_settings.client_concurrency_set: - opts[CONCURRENCY] = api_inst.img.get_property( - DEFAULT_CONCURRENCY) - else: - # remove concurrency from parameters dict - del opts_new[CONCURRENCY] - return + if opts[CONCURRENCY] is None: + # If the concurrency has been set by an environment variable + # then client_concurrency_set will be true. Don't override with + # the image default in this case. + if not global_settings.client_concurrency_set: + opts[CONCURRENCY] = api_inst.img.get_property(DEFAULT_CONCURRENCY) + else: + # remove concurrency from parameters dict + del opts_new[CONCURRENCY] + return - # make sure we have an integer - opts_cb_int(CONCURRENCY, api_inst, opts, opts_new) + # make sure we have an integer + opts_cb_int(CONCURRENCY, api_inst, opts, opts_new) + + # update global concurrency setting + global_settings.client_concurrency = opts_new[CONCURRENCY] + global_settings.client_concurrency_set = True - # update global concurrency setting - global_settings.client_concurrency = opts_new[CONCURRENCY] - global_settings.client_concurrency_set = True + # remove concurrency from parameters dict + del opts_new[CONCURRENCY] - # remove concurrency from parameters dict - del opts_new[CONCURRENCY] def opts_table_cb_actuators(op, api_inst, opts, opts_new): + del opts_new[ACT_TIMEOUT] + del opts_new[SYNC_ACT] - del opts_new[ACT_TIMEOUT] - del opts_new[SYNC_ACT] + if opts[ACT_TIMEOUT]: + # make sure we have an integer + opts_cb_int(ACT_TIMEOUT, api_inst, opts, opts_new) + elif opts[SYNC_ACT]: + # -1 is no timeout + opts_new[ACT_TIMEOUT] = -1 + else: + # 0 is no sync actuators are used (timeout=0) + opts_new[ACT_TIMEOUT] = 0 - if opts[ACT_TIMEOUT]: - # make sure we have an integer - opts_cb_int(ACT_TIMEOUT, api_inst, opts, opts_new) - elif opts[SYNC_ACT]: - # -1 is no timeout - opts_new[ACT_TIMEOUT] = -1 - else: - # 0 is no sync actuators are used (timeout=0) - opts_new[ACT_TIMEOUT] = 0 # # options common to multiple pkg(1) operations. The format for specifying @@ -921,6 +1028,8 @@ def opts_table_cb_actuators(op, api_inst, opts, opts_new): # {}: json schema. # +# fmt: off + opts_table_info = [ opts_table_cb_info, (DISPLAY_LICENSE, False, [], {"type": "boolean"}), @@ -1378,7 +1487,6 @@ def opts_table_cb_actuators(op, api_inst, opts, opts_new): [] pkg_op_opts = { - pkgdefs.PKG_OP_ATTACH : opts_attach_linked, pkgdefs.PKG_OP_AUDIT_LINKED : opts_audit_linked, pkgdefs.PKG_OP_CHANGE_FACET : opts_install, @@ -1409,130 +1517,133 @@ def opts_table_cb_actuators(op, api_inst, opts, opts_new): pkgdefs.PKG_OP_VERIFY : opts_verify } -def get_pkg_opts(op, add_table=None): - """Get the available options for a particular operation specified by - 'op'. If the client uses custom pkg_op_opts tables they can be specified - by 'add_table'.""" +# fmt: on - popts = pkg_op_opts.copy() - if add_table is not None: - popts.update(add_table) - try: - opts = popts[op] - except KeyError: - opts = None - return opts - -def get_pkg_opts_defaults(op, opt, add_table=None): - """ Get the default value for a certain option 'opt' of a certain - operation 'op'. This is useful for clients which toggle boolean options. - """ - popts = get_pkg_opts(op, add_table) - - for o in popts: - if type(o) != tuple: - continue - if len(o) == 2: - opt_name, default = o - elif len(o) == 3: - opt_name, default, dummy_valid_args = o - elif len(o) == 4: - opt_name, default, dummy_valid_args, dummy_schema = o - if opt_name == opt: - return default - -def opts_assemble(op, api_inst, opts, add_table=None, cwd=None): - """Assembly of the options for a specific operation. Options are read in - from a dict (see explanation below) and sanity tested. +def get_pkg_opts(op, add_table=None): + """Get the available options for a particular operation specified by + 'op'. If the client uses custom pkg_op_opts tables they can be specified + by 'add_table'.""" - This is the common interface to supply options to the functions of the - API. + popts = pkg_op_opts.copy() + if add_table is not None: + popts.update(add_table) - 'op' is the operation for which the options need to be assembled and - verified. The currently supported operations are listed in - pkgdefs.pkg_op_values. + try: + opts = popts[op] + except KeyError: + opts = None + return opts - 'api_inst' is a reference to the API instance, required for some of the - verification steps. - 'opts' is the raw options table to be processed. It needs to be a dict - in the format: { option_name: argument, ... } - """ +def get_pkg_opts_defaults(op, opt, add_table=None): + """Get the default value for a certain option 'opt' of a certain + operation 'op'. This is useful for clients which toggle boolean options. + """ + popts = get_pkg_opts(op, add_table) + + for o in popts: + if type(o) != tuple: + continue + if len(o) == 2: + opt_name, default = o + elif len(o) == 3: + opt_name, default, dummy_valid_args = o + elif len(o) == 4: + opt_name, default, dummy_valid_args, dummy_schema = o + if opt_name == opt: + return default - global _orig_cwd - if cwd is not None: - _orig_cwd = cwd - else: - _orig_cwd = None - - popts = get_pkg_opts(op, add_table) - - rv = {} - callbacks = [] - - for o in popts: - if type(o) != tuple: - callbacks.append(o) - continue - valid_args = [] - # If no valid argument list specified. - if len(o) == 2: - avail_opt, default = o - elif len(o) == 3: - avail_opt, default, valid_args = o - elif len(o) == 4: - avail_opt, default, valid_args, schema = o - # for options not given we substitue the default value - if avail_opt not in opts: - rv[avail_opt] = default - continue - - if type(default) == int: - assert type(opts[avail_opt]) == int, opts[avail_opt] - elif type(default) == list: - assert type(opts[avail_opt]) == list, opts[avail_opt] - elif type(default) == bool: - assert type(opts[avail_opt]) == bool, opts[avail_opt] - - if valid_args: - assert type(default) == list or default is None, \ - default - raise_error = False - if type(opts[avail_opt]) == list: - if not set(opts[avail_opt]).issubset( - set(valid_args)): - raise_error = True - else: - # If the any of valid_args is integer, we first - # try to convert the argument value into - # integer. This is for CLI mode where arguments - # are strings. - if any(type(va) == int for va in valid_args): - try: - opts[avail_opt] = int( - opts[avail_opt]) - except Exception: - pass - if opts[avail_opt] not in valid_args: - raise_error = True - if raise_error: - raise InvalidOptionError( - InvalidOptionError.ARG_INVALID, - [opts[avail_opt], avail_opt], - valid_args=valid_args) - - rv[avail_opt] = opts[avail_opt] - - rv_updated = rv.copy() - - # run the option verification callbacks - for cb in callbacks: - cb(op, api_inst, rv, rv_updated) - - return rv_updated +def opts_assemble(op, api_inst, opts, add_table=None, cwd=None): + """Assembly of the options for a specific operation. Options are read in + from a dict (see explanation below) and sanity tested. + + This is the common interface to supply options to the functions of the + API. + + 'op' is the operation for which the options need to be assembled and + verified. The currently supported operations are listed in + pkgdefs.pkg_op_values. + + 'api_inst' is a reference to the API instance, required for some of the + verification steps. + + 'opts' is the raw options table to be processed. It needs to be a dict + in the format: { option_name: argument, ... } + """ + + global _orig_cwd + + if cwd is not None: + _orig_cwd = cwd + else: + _orig_cwd = None + + popts = get_pkg_opts(op, add_table) + + rv = {} + callbacks = [] + + for o in popts: + if type(o) != tuple: + callbacks.append(o) + continue + valid_args = [] + # If no valid argument list specified. + if len(o) == 2: + avail_opt, default = o + elif len(o) == 3: + avail_opt, default, valid_args = o + elif len(o) == 4: + avail_opt, default, valid_args, schema = o + # for options not given we substitue the default value + if avail_opt not in opts: + rv[avail_opt] = default + continue + + if type(default) == int: + assert type(opts[avail_opt]) == int, opts[avail_opt] + elif type(default) == list: + assert type(opts[avail_opt]) == list, opts[avail_opt] + elif type(default) == bool: + assert type(opts[avail_opt]) == bool, opts[avail_opt] + + if valid_args: + assert type(default) == list or default is None, default + raise_error = False + if type(opts[avail_opt]) == list: + if not set(opts[avail_opt]).issubset(set(valid_args)): + raise_error = True + else: + # If the any of valid_args is integer, we first + # try to convert the argument value into + # integer. This is for CLI mode where arguments + # are strings. + if any(type(va) == int for va in valid_args): + try: + opts[avail_opt] = int(opts[avail_opt]) + except Exception: + pass + if opts[avail_opt] not in valid_args: + raise_error = True + if raise_error: + raise InvalidOptionError( + InvalidOptionError.ARG_INVALID, + [opts[avail_opt], avail_opt], + valid_args=valid_args, + ) + + rv[avail_opt] = opts[avail_opt] + + rv_updated = rv.copy() + + # run the option verification callbacks + for cb in callbacks: + cb(op, api_inst, rv, rv_updated) + + return rv_updated # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/pkg_solver.py b/src/modules/client/pkg_solver.py index 4caf8274b..c7b9865b4 100644 --- a/src/modules/client/pkg_solver.py +++ b/src/modules/client/pkg_solver.py @@ -32,10 +32,12 @@ import time from collections import defaultdict + # Redefining built-in; pylint: disable=W0622 from functools import reduce import six + # Imports from package six are not grouped: pylint: disable=C0412 from six.moves import range from itertools import chain @@ -55,9 +57,9 @@ from pkg.client.pkgdefs import PKG_OP_UNINSTALL, PKG_OP_UPDATE from pkg.misc import EmptyI, EmptyDict, N_ -SOLVER_INIT = "Initialized" -SOLVER_OXY = "Not possible" -SOLVER_FAIL = "Failed" +SOLVER_INIT = "Initialized" +SOLVER_OXY = "Not possible" +SOLVER_FAIL = "Failed" SOLVER_SUCCESS = "Succeeded" # @@ -66,3462 +68,3765 @@ # instead, they indicate the 'type' of trim applied. Values below must be # unique, but can be changed at any time. # -_TRIM_DEP_MISSING = 0 # no matching pkg version found for dep -_TRIM_DEP_OBSOLETE = 1 # all versions allowed by dep are obsolete -_TRIM_DEP_TRIMMED = 2 # all versions allowed by dep already trimmed -_TRIM_FIRMWARE = 3 # firmware version requirement -_TRIM_FREEZE = 4 # pkg not allowed by freeze -_TRIM_INSTALLED_EXCLUDE = 5 # pkg excludes installed pkg -_TRIM_INSTALLED_INC = 6 # not allowed by installed pkg incorporation -_TRIM_INSTALLED_NEWER = 7 # newer version installed already -_TRIM_INSTALLED_ORIGIN = 8 # installed version in image too old -_TRIM_INSTALLED_ROOT_ORIGIN = 9 # installed version in root image too old -_TRIM_PARENT_MISSING = 10 # parent image must have this pkg too -_TRIM_PARENT_NEWER = 11 # parent image has newer version -_TRIM_PARENT_OLDER = 12 # parent image has older version -_TRIM_PARENT_PUB = 13 # parent image has different publisher -_TRIM_PROPOSED_INC = 14 # not allowed by requested pkg incorporation -_TRIM_PROPOSED_PUB = 15 # didn't match requested publisher -_TRIM_PROPOSED_VER = 16 # didn't match requested version -_TRIM_PUB_RANK = 17 # pkg from higher or lower ranked publisher -_TRIM_PUB_STICKY = 18 # pkg publisher != installed pkg publisher -_TRIM_REJECT = 19 # --reject -_TRIM_UNSUPPORTED = 20 # invalid or unsupported actions -_TRIM_VARIANT = 21 # unsupported variant (e.g. i386 on sparc) -_TRIM_EXPLICIT_INSTALL = 22 # pkg.depend.explicit-install is true. -_TRIM_SYNCED_INC = 23 # incorporation must be in sync with parent -_TRIM_MAX = 24 # number of trim constants +_TRIM_DEP_MISSING = 0 # no matching pkg version found for dep +_TRIM_DEP_OBSOLETE = 1 # all versions allowed by dep are obsolete +_TRIM_DEP_TRIMMED = 2 # all versions allowed by dep already trimmed +_TRIM_FIRMWARE = 3 # firmware version requirement +_TRIM_FREEZE = 4 # pkg not allowed by freeze +_TRIM_INSTALLED_EXCLUDE = 5 # pkg excludes installed pkg +_TRIM_INSTALLED_INC = 6 # not allowed by installed pkg incorporation +_TRIM_INSTALLED_NEWER = 7 # newer version installed already +_TRIM_INSTALLED_ORIGIN = 8 # installed version in image too old +_TRIM_INSTALLED_ROOT_ORIGIN = 9 # installed version in root image too old +_TRIM_PARENT_MISSING = 10 # parent image must have this pkg too +_TRIM_PARENT_NEWER = 11 # parent image has newer version +_TRIM_PARENT_OLDER = 12 # parent image has older version +_TRIM_PARENT_PUB = 13 # parent image has different publisher +_TRIM_PROPOSED_INC = 14 # not allowed by requested pkg incorporation +_TRIM_PROPOSED_PUB = 15 # didn't match requested publisher +_TRIM_PROPOSED_VER = 16 # didn't match requested version +_TRIM_PUB_RANK = 17 # pkg from higher or lower ranked publisher +_TRIM_PUB_STICKY = 18 # pkg publisher != installed pkg publisher +_TRIM_REJECT = 19 # --reject +_TRIM_UNSUPPORTED = 20 # invalid or unsupported actions +_TRIM_VARIANT = 21 # unsupported variant (e.g. i386 on sparc) +_TRIM_EXPLICIT_INSTALL = 22 # pkg.depend.explicit-install is true. +_TRIM_SYNCED_INC = 23 # incorporation must be in sync with parent +_TRIM_MAX = 24 # number of trim constants class DependencyException(Exception): - """local exception used to pass failure to match - dependencies in packages out of nested evaluation""" + """local exception used to pass failure to match + dependencies in packages out of nested evaluation""" - def __init__(self, reason_id, reason, fmris=EmptyI): - Exception.__init__(self) - self.__fmris = fmris - self.__reason_id = reason_id - self.__reason = reason + def __init__(self, reason_id, reason, fmris=EmptyI): + Exception.__init__(self) + self.__fmris = fmris + self.__reason_id = reason_id + self.__reason = reason - @property - def fmris(self): - """The FMRIs related to the exception.""" - return self.__fmris + @property + def fmris(self): + """The FMRIs related to the exception.""" + return self.__fmris - @property - def reason_id(self): - """A constant indicating why the related FMRIs were rejected.""" - return self.__reason_id + @property + def reason_id(self): + """A constant indicating why the related FMRIs were rejected.""" + return self.__reason_id - @property - def reason(self): - """A string describing why the related FMRIs were rejected.""" - return self.__reason + @property + def reason(self): + """A string describing why the related FMRIs were rejected.""" + return self.__reason class PkgSolver(object): - """Provides a SAT-based solution solver to determine which packages - should be installed, updated, or removed to perform a requested - operation.""" - - def __init__(self, cat, installed_dict, pub_ranks, variants, avoids, - parent_pkgs, progtrack): - """Create a PkgSolver instance; catalog should contain all - known pkgs, installed fmris should be a dict of fmris indexed - by name that define pkgs current installed in the image. - Pub_ranks dict contains (rank, stickiness, enabled) for each - publisher. variants are the current image variants; avoids is - the set of pkg stems being avoided in the image due to - administrator action (e.g. --reject, uninstall).""" - - # Value 'DebugValues' is unsubscriptable; - # pylint: disable=E1136 - # check if we're allowed to use the solver - if DebugValues["no_solver"]: - raise RuntimeError("no_solver set, but solver invoked") - - self.__catalog = cat - self.__known_incs = set() # stems with incorporate deps - self.__publisher = {} # indexed by stem - self.__possible_dict = defaultdict(list) # indexed by stem - self.__pub_ranks = pub_ranks # rank indexed by pub - self.__depend_ts = False # flag used to indicate whether - # any dependencies with - # timestamps were seen; used in - # error output generation - self.__trim_dict = defaultdict(set) # fmris trimmed from - # consideration - - self.__installed_dict = installed_dict.copy() # indexed by stem - self.__installed_pkgs = frozenset(self.__installed_dict) - self.__installed_fmris = frozenset( - self.__installed_dict.values()) - - self.__pub_trim = {} # pkg names already - # trimmed by pub. - self.__removal_fmris = set() # installed fmris we're - # going to remove - - self.__req_pkg_names = set() # package names that must be - # present in solution by spec. - for f in self.__installed_fmris: # record only sticky pubs - pub = f.publisher - if self.__pub_ranks[pub][1]: - self.__publisher[f.pkg_name] = pub - - self.__id2fmri = {} # map ids -> fmris - self.__fmri2id = {} # and reverse - - self.__solver = pkg.solver.msat_solver() - - self.__progtrack = progtrack # progress tracker - self.__progitem = None # progress tracker plan item - - self.__addclause_failure = False - - self.__variant_dict = {} # fmris -> variant cache - self.__variants = variants # variants supported by image - - self.__cache = {} - self.__actcache = {} - self.__trimdone = False # indicate we're finished - # trimming - self.__fmri_state = {} # cache of obsolete, renamed - # bits so we can print something - # reasonable - self.__state = SOLVER_INIT - self.__iterations = 0 - self.__clauses = 0 - self.__variables = 0 - self.__subphasename = None - self.__timings = [] - self.__start_time = 0 - self.__inc_list = [] - self.__dependents = None - # set of fmris installed in root image; used for origin - # dependencies - self.__root_fmris = None - # set of stems avoided by admin (e.g. --reject, uninstall) - self.__avoid_set = avoids.copy() - # set of stems avoided by solver due to dependency constraints - # (e.g. all fmris that satisfy group dependency trimmed); this - # intentionally starts empty for every new solver invocation and - # is only stored in image configuration for diagnostic purposes. - self.__implicit_avoid_set = set() - # set of obsolete stems - self.__obs_set = None - # set of stems we're rejecting - self.__reject_set = set() - # pkgs that have parent deps - self.__linked_pkgs = set() - - # Internal cache of created fmri objects. Used so that the same - # PkgFmri doesn't need to be created more than once. This isn't - # a weakref dictionary because in two of the four places where - # PkgFmri's are created, the name is extracted and the PkgFmri - # object is immediately discarded. - self.__fmridict = {} - - # Packages with explicit install action set to true. - self.__expl_install_dict = {} - - assert isinstance(parent_pkgs, (type(None), frozenset)) - self.__parent_pkgs = parent_pkgs - self.__parent_dict = dict() - if self.__parent_pkgs != None: - self.__parent_dict = dict([ - (f.pkg_name, f) - for f in self.__parent_pkgs - ]) - - # cache of firmware dependencies - self.__firmware = Firmware() - - self.__triggered_ops = { - PKG_OP_UNINSTALL : { - PKG_OP_UPDATE : set(), - PKG_OP_UNINSTALL : set(), - }, - } - - self.__allowed_downgrades = set() # allowed downrev FMRIs - self.__dg_incorp_cache = {} # cache for downgradable - # incorp deps - - def __str__(self): - s = "Solver: [" - if self.__state in [SOLVER_FAIL, SOLVER_SUCCESS]: - s += (" Variables: {0:d} Clauses: {1:d} Iterations: " - "{2:d}").format(self.__variables, self.__clauses, - self.__iterations) - s += " State: {0}]".format(self.__state) - - s += "\nTimings: [" - s += ", ".join([ - "{0}: {1: 6.3f}".format(*a) - for a in self.__timings - ]) - s += "]" - - if self.__inc_list: - incs = "\n\t".join([str(a) for a in self.__inc_list]) - else: - incs = "None" - - s += "\nMaintained incorporations: {0}\n".format(incs) - - return s - - def __cleanup(self, rval): - """Discards all solver information except for that needed to - show failure information or to stringify the solver object. - This allows early garbage collection to take place, and should - be performed after a solution is successfully returned.""" - - self.__catalog = None - self.__installed_dict = {} - self.__installed_pkgs = frozenset() - self.__installed_fmris = frozenset() - self.__publisher = {} - self.__possible_dict = {} - self.__pub_ranks = None - self.__pub_trim = {} - self.__removal_fmris = set() - self.__id2fmri = None - self.__fmri2id = None - self.__solver = None - self.__progtrack = None - self.__addclause_failure = False - self.__variant_dict = None - self.__variants = None - self.__cache = None - self.__actcache = None - self.__trimdone = None - self.__fmri_state = None - self.__start_time = None - self.__dependents = None - self.__fmridict = {} - self.__firmware = None - self.__allowed_downgrades = None - self.__dg_incorp_cache = None - self.__linked_pkgs = set() - - # Value 'DebugValues' is unsubscriptable; - # pylint: disable=E1136 - if DebugValues["plan"]: - # Remaining data must be kept. - return rval - - self.__trim_dict = None - return rval - - def __progress(self): - """Bump progress tracker to indicate processing is active.""" - assert self.__progitem - self.__progtrack.plan_add_progress(self.__progitem) - - def __start_subphase(self, subphase=None, reset=False): - """Add timing records and tickle progress tracker. Ends - previous subphase if ongoing.""" - if reset: - self.__timings = [] - if self.__subphasename is not None: - self.__end_subphase() - self.__start_time = time.time() - self.__subphasename = "phase {0:d}".format(subphase) - self.__progress() + """Provides a SAT-based solution solver to determine which packages + should be installed, updated, or removed to perform a requested + operation.""" + + def __init__( + self, + cat, + installed_dict, + pub_ranks, + variants, + avoids, + parent_pkgs, + progtrack, + ): + """Create a PkgSolver instance; catalog should contain all + known pkgs, installed fmris should be a dict of fmris indexed + by name that define pkgs current installed in the image. + Pub_ranks dict contains (rank, stickiness, enabled) for each + publisher. variants are the current image variants; avoids is + the set of pkg stems being avoided in the image due to + administrator action (e.g. --reject, uninstall).""" + + # Value 'DebugValues' is unsubscriptable; + # pylint: disable=E1136 + # check if we're allowed to use the solver + if DebugValues["no_solver"]: + raise RuntimeError("no_solver set, but solver invoked") + + self.__catalog = cat + self.__known_incs = set() # stems with incorporate deps + self.__publisher = {} # indexed by stem + self.__possible_dict = defaultdict(list) # indexed by stem + self.__pub_ranks = pub_ranks # rank indexed by pub + self.__depend_ts = False # flag used to indicate whether + # any dependencies with + # timestamps were seen; used in + # error output generation + self.__trim_dict = defaultdict(set) # fmris trimmed from + # consideration + + self.__installed_dict = installed_dict.copy() # indexed by stem + self.__installed_pkgs = frozenset(self.__installed_dict) + self.__installed_fmris = frozenset(self.__installed_dict.values()) + + self.__pub_trim = {} # pkg names already + # trimmed by pub. + self.__removal_fmris = set() # installed fmris we're + # going to remove + + self.__req_pkg_names = set() # package names that must be + # present in solution by spec. + for f in self.__installed_fmris: # record only sticky pubs + pub = f.publisher + if self.__pub_ranks[pub][1]: + self.__publisher[f.pkg_name] = pub + + self.__id2fmri = {} # map ids -> fmris + self.__fmri2id = {} # and reverse + + self.__solver = pkg.solver.msat_solver() + + self.__progtrack = progtrack # progress tracker + self.__progitem = None # progress tracker plan item + + self.__addclause_failure = False + + self.__variant_dict = {} # fmris -> variant cache + self.__variants = variants # variants supported by image + + self.__cache = {} + self.__actcache = {} + self.__trimdone = False # indicate we're finished + # trimming + self.__fmri_state = {} # cache of obsolete, renamed + # bits so we can print something + # reasonable + self.__state = SOLVER_INIT + self.__iterations = 0 + self.__clauses = 0 + self.__variables = 0 + self.__subphasename = None + self.__timings = [] + self.__start_time = 0 + self.__inc_list = [] + self.__dependents = None + # set of fmris installed in root image; used for origin + # dependencies + self.__root_fmris = None + # set of stems avoided by admin (e.g. --reject, uninstall) + self.__avoid_set = avoids.copy() + # set of stems avoided by solver due to dependency constraints + # (e.g. all fmris that satisfy group dependency trimmed); this + # intentionally starts empty for every new solver invocation and + # is only stored in image configuration for diagnostic purposes. + self.__implicit_avoid_set = set() + # set of obsolete stems + self.__obs_set = None + # set of stems we're rejecting + self.__reject_set = set() + # pkgs that have parent deps + self.__linked_pkgs = set() + + # Internal cache of created fmri objects. Used so that the same + # PkgFmri doesn't need to be created more than once. This isn't + # a weakref dictionary because in two of the four places where + # PkgFmri's are created, the name is extracted and the PkgFmri + # object is immediately discarded. + self.__fmridict = {} + + # Packages with explicit install action set to true. + self.__expl_install_dict = {} + + assert isinstance(parent_pkgs, (type(None), frozenset)) + self.__parent_pkgs = parent_pkgs + self.__parent_dict = dict() + if self.__parent_pkgs != None: + self.__parent_dict = dict( + [(f.pkg_name, f) for f in self.__parent_pkgs] + ) + + # cache of firmware dependencies + self.__firmware = Firmware() + + self.__triggered_ops = { + PKG_OP_UNINSTALL: { + PKG_OP_UPDATE: set(), + PKG_OP_UNINSTALL: set(), + }, + } + + self.__allowed_downgrades = set() # allowed downrev FMRIs + self.__dg_incorp_cache = {} # cache for downgradable + # incorp deps + + def __str__(self): + s = "Solver: [" + if self.__state in [SOLVER_FAIL, SOLVER_SUCCESS]: + s += ( + " Variables: {0:d} Clauses: {1:d} Iterations: " "{2:d}" + ).format(self.__variables, self.__clauses, self.__iterations) + s += " State: {0}]".format(self.__state) + + s += "\nTimings: [" + s += ", ".join(["{0}: {1: 6.3f}".format(*a) for a in self.__timings]) + s += "]" + + if self.__inc_list: + incs = "\n\t".join([str(a) for a in self.__inc_list]) + else: + incs = "None" + + s += "\nMaintained incorporations: {0}\n".format(incs) + + return s + + def __cleanup(self, rval): + """Discards all solver information except for that needed to + show failure information or to stringify the solver object. + This allows early garbage collection to take place, and should + be performed after a solution is successfully returned.""" + + self.__catalog = None + self.__installed_dict = {} + self.__installed_pkgs = frozenset() + self.__installed_fmris = frozenset() + self.__publisher = {} + self.__possible_dict = {} + self.__pub_ranks = None + self.__pub_trim = {} + self.__removal_fmris = set() + self.__id2fmri = None + self.__fmri2id = None + self.__solver = None + self.__progtrack = None + self.__addclause_failure = False + self.__variant_dict = None + self.__variants = None + self.__cache = None + self.__actcache = None + self.__trimdone = None + self.__fmri_state = None + self.__start_time = None + self.__dependents = None + self.__fmridict = {} + self.__firmware = None + self.__allowed_downgrades = None + self.__dg_incorp_cache = None + self.__linked_pkgs = set() + + # Value 'DebugValues' is unsubscriptable; + # pylint: disable=E1136 + if DebugValues["plan"]: + # Remaining data must be kept. + return rval + + self.__trim_dict = None + return rval + + def __progress(self): + """Bump progress tracker to indicate processing is active.""" + assert self.__progitem + self.__progtrack.plan_add_progress(self.__progitem) + + def __start_subphase(self, subphase=None, reset=False): + """Add timing records and tickle progress tracker. Ends + previous subphase if ongoing.""" + if reset: + self.__timings = [] + if self.__subphasename is not None: + self.__end_subphase() + self.__start_time = time.time() + self.__subphasename = "phase {0:d}".format(subphase) + self.__progress() + + def __end_subphase(self): + """Mark the end of a solver subphase, recording time taken.""" + now = time.time() + self.__timings.append((self.__subphasename, now - self.__start_time)) + self.__start_time = None + self.__subphasename = None + + def __trim_frozen(self, existing_freezes): + """Trim any packages we cannot update due to freezes.""" + for f, r, _t in existing_freezes: + if r: + reason = ( + N_( + "This version is excluded by a " + "freeze on {0} at version {1}. The " + "reason for the freeze is: {2}" + ), + (f.pkg_name, f.version.get_version(include_build=False), r), + ) + else: + reason = ( + N_( + "This version is excluded by a " + "freeze on {0} at version {1}." + ), + (f.pkg_name, f.version.get_version(include_build=False)), + ) + self.__trim( + self.__comb_auto_fmris(f, dotrim=False)[1], _TRIM_FREEZE, reason + ) + + def __raise_solution_error(self, no_version=EmptyI, no_solution=EmptyI): + """Raise a plan exception due to solution errors.""" + + solver_errors = None + # Value 'DebugValues' is unsubscriptable; + # pylint: disable=E1136 + if DebugValues["plan"]: + solver_errors = self.get_trim_errors() + raise api_errors.PlanCreationException( + no_solution=no_solution, + no_version=no_version, + solver_errors=solver_errors, + ) + + def __trim_proposed(self, proposed_dict): + """Remove any versions from proposed_dict that are in trim_dict + and raise an exception if no matching version of a proposed + package can be installed at this point.""" + + if proposed_dict is None: + # Nothing to do. + return + + # Used to de-dup errors. + already_seen = set() + + ret = [] + for name in proposed_dict: + tv = self.__dotrim(proposed_dict[name]) + if tv: + proposed_dict[name] = tv + continue + + ret.extend( + [ + _("No matching version of {0} can be " "installed:").format( + name + ) + ] + ) + ret.extend( + self.__fmri_list_errors( + proposed_dict[name], already_seen=already_seen + ) + ) + # continue processing and accumulate all errors + if ret: + self.__raise_solution_error(no_version=ret) + + def __set_removed_and_required_packages(self, rejected, proposed=None): + """Sets the list of package to be removed from the image, the + list of packages to reject, the list of packages to avoid + during the operation, and the list of packages that must not be + removed from the image. + + 'rejected' is a set of package stems to reject. + + 'proposed' is an optional set of FMRI objects representing + packages to install or update. + + Upon return: + * self.__removal_fmris will contain the list of FMRIs to be + removed from the image due to user request or due to past + bugs that caused wrong variant to be installed by mistake. + + * self.__reject_set will contain the list of packages to avoid + or that were rejected by user request as appropriate.""" + + if proposed is None: + proposed = set() + else: + # remove packages to be installed from avoid sets + self.__avoid_set -= proposed + self.__implicit_avoid_set -= proposed + + self.__removal_fmris |= set( + [ + self.__installed_dict[name] + for name in rejected + if name in self.__installed_dict + ] + + [ + f + for f in self.__installed_fmris + if not self.__trim_nonmatching_variants(f) + ] + ) + + self.__reject_set = rejected + + # trim fmris that user explicitly disallowed + for name in rejected: + self.__trim( + self.__get_catalog_fmris(name), + _TRIM_REJECT, + N_("This version rejected by user request"), + ) + + self.__req_pkg_names = (self.__installed_pkgs | proposed) - rejected + self.__req_pkg_names -= set(f.pkg_name for f in self.__removal_fmris) + + def __set_proposed_required(self, proposed_dict, excludes): + """Add the common set of conditional, group, and require + dependencies of proposed packages to the list of package stems + known to be a required part of the solution. This will improve + error messaging if no solution is found.""" + + if proposed_dict is None: + return + + req_dep_names = set() + for name in proposed_dict: + # Find intersection of the set of conditional, group, + # and require dependencies for all proposed versions of + # the proposed package. The result is the set of + # package stems we know will be required to be part of + # the solution. + comm_deps = None + propvers = set(self.__dotrim(proposed_dict[name])) + for f in propvers: + prop_deps = set( + dname + for (dtype, dname) in ( + ( + da.attrs["type"], + pkg.fmri.extract_pkg_name(da.attrs["fmri"]), + ) + for da in self.__get_dependency_actions(f, excludes) + if da.attrs["type"] == "conditional" + or da.attrs["type"] == "group" + or da.attrs["type"] == "require" + ) + if dtype != "group" + or ( + dname not in self.__avoid_set + and dname not in self.__reject_set + ) + ) - def __end_subphase(self): - """Mark the end of a solver subphase, recording time taken.""" - now = time.time() - self.__timings.append((self.__subphasename, - now - self.__start_time)) - self.__start_time = None - self.__subphasename = None - - def __trim_frozen(self, existing_freezes): - """Trim any packages we cannot update due to freezes.""" - for f, r, _t in existing_freezes: - if r: - reason = (N_("This version is excluded by a " - "freeze on {0} at version {1}. The " - "reason for the freeze is: {2}"), - (f.pkg_name, f.version.get_version( - include_build=False), r)) - else: - reason = (N_("This version is excluded by a " - "freeze on {0} at version {1}."), - (f.pkg_name, f.version.get_version( - include_build=False))) - self.__trim(self.__comb_auto_fmris(f, dotrim=False)[1], - _TRIM_FREEZE, reason) - - def __raise_solution_error(self, no_version=EmptyI, no_solution=EmptyI): - """Raise a plan exception due to solution errors.""" - - solver_errors = None - # Value 'DebugValues' is unsubscriptable; - # pylint: disable=E1136 - if DebugValues["plan"]: - solver_errors = self.get_trim_errors() - raise api_errors.PlanCreationException(no_solution=no_solution, - no_version=no_version, solver_errors=solver_errors) - - def __trim_proposed(self, proposed_dict): - """Remove any versions from proposed_dict that are in trim_dict - and raise an exception if no matching version of a proposed - package can be installed at this point.""" - - if proposed_dict is None: - # Nothing to do. - return - - # Used to de-dup errors. - already_seen = set() - - ret = [] - for name in proposed_dict: - tv = self.__dotrim(proposed_dict[name]) - if tv: - proposed_dict[name] = tv - continue - - ret.extend([_("No matching version of {0} can be " - "installed:").format(name)]) - ret.extend(self.__fmri_list_errors(proposed_dict[name], - already_seen=already_seen)) - # continue processing and accumulate all errors - if ret: - self.__raise_solution_error(no_version=ret) - - def __set_removed_and_required_packages(self, rejected, proposed=None): - """Sets the list of package to be removed from the image, the - list of packages to reject, the list of packages to avoid - during the operation, and the list of packages that must not be - removed from the image. - - 'rejected' is a set of package stems to reject. - - 'proposed' is an optional set of FMRI objects representing - packages to install or update. - - Upon return: - * self.__removal_fmris will contain the list of FMRIs to be - removed from the image due to user request or due to past - bugs that caused wrong variant to be installed by mistake. - - * self.__reject_set will contain the list of packages to avoid - or that were rejected by user request as appropriate.""" - - if proposed is None: - proposed = set() + if comm_deps is None: + comm_deps = prop_deps else: - # remove packages to be installed from avoid sets - self.__avoid_set -= proposed - self.__implicit_avoid_set -= proposed - - self.__removal_fmris |= set([ - self.__installed_dict[name] - for name in rejected - if name in self.__installed_dict - ] + [ - f - for f in self.__installed_fmris - if not self.__trim_nonmatching_variants(f) - ]) - - self.__reject_set = rejected - - # trim fmris that user explicitly disallowed - for name in rejected: - self.__trim(self.__get_catalog_fmris(name), - _TRIM_REJECT, - N_("This version rejected by user request")) - - self.__req_pkg_names = (self.__installed_pkgs | - proposed) - rejected - self.__req_pkg_names -= set( - f.pkg_name - for f in self.__removal_fmris + comm_deps &= prop_deps + + if comm_deps: + req_dep_names |= comm_deps + + self.__req_pkg_names = frozenset(req_dep_names | self.__req_pkg_names) + + def __update_possible_closure( + self, + possible, + excludes, + full_trim=False, + filter_explicit=True, + proposed_dict=None, + ): + """Update the provided possible set of fmris with the transitive + closure of dependencies that can be satisfied, trimming those + packages that cannot be installed. + + 'possible' is a set of FMRI objects representing all possible + versions of packages to consider for the operation. + + 'full_trim' is an optional boolean indicating whether a full + trim of the dependency graph should be performed. This is NOT + required for the solver to find a solution. Trimming is only + needed to reduce the size of clauses and to provide error + messages. This requires multiple passes to determine if the + transitive closure of dependencies can be satisfied. This is + not required for correctness (and it greatly increases runtime). + However, it does greatly improve error messaging for some error + cases. + + 'filter_explicit' is an optional boolean indicating whether + packages with pkg.depend.explicit-install set to true will be + filtered out. + + 'proposed_dict' contains user specified FMRI objects indexed by + pkg_name that should be installed or updated within an image. + + An example of a case where full_trim will be useful (dueling + incorporations): + + Installed: + entire + incorporates java-7-incorporation + Proposed: + osnet-incorporation + incorporates system/resource-mgmt/dynamic-resource-pools + system/resource-mgmt/dynamic-resource-pools + requires new version of java not allowed by installed + java-7-incorporation""" + + first = True + while True: + tsize = len(self.__trim_dict) + res = self.__generate_dependency_closure( + possible, + excludes=excludes, + full_trim=full_trim, + filter_explicit=filter_explicit, + proposed_dict=proposed_dict, + ) + if first: + # The first pass will return the transitive + # closure of all dependencies; subsequent passes + # are only done for trimming, so need to update + # the possible set only on first pass. + possible.update(res) + first = False + + nsize = len(self.__trim_dict) + if not full_trim or nsize == tsize: + # Nothing more to trim. + break + + # Remove trimmed items from possible_set. + possible.difference_update(six.iterkeys(self.__trim_dict)) + + def __enforce_unique_packages(self, excludes): + """Constrain the solver solution so that only one version of + each package can be installed and generate dependency clauses + for possible packages.""" + + # Generate clauses for only one version of each package, and + # for dependencies for each package. Do so for all possible + # fmris. + for name in self.__possible_dict: + self.__progress() + # Ensure only one version of a package is installed + self.__addclauses( + self.__gen_highlander_clauses(self.__possible_dict[name]) + ) + # generate dependency clauses for each pkg + for fmri in self.__possible_dict[name]: + for da in self.__get_dependency_actions( + fmri, excludes=excludes + ): + self.__addclauses(self.__gen_dependency_clauses(fmri, da)) + + def __generate_operation_clauses(self, proposed=None, proposed_dict=None): + """Generate initial solver clauses for the proposed packages (if + any) and installed packages. + + 'proposed' is a set of FMRI objects representing packages to + install or update. + + 'proposed_dict' contains user specified FMRI objects indexed by + pkg_name that should be installed or updated within an image.""" + + assert (proposed is None and proposed_dict is None) or ( + proposed is not None and proposed_dict is not None + ) + + if proposed is None: + proposed = set() + if proposed_dict is None: + proposed_dict = EmptyDict + + # Generate clauses for proposed and installed pkgs note that we + # create clauses that require one of the proposed pkgs to work; + # this allows the possible_set to always contain the existing + # pkgs. + for name in proposed_dict: + self.__progress() + self.__addclauses( + self.__gen_one_of_these_clauses( + set(proposed_dict[name]) & set(self.__possible_dict[name]) + ) + ) + + for name in ( + self.__installed_pkgs + - proposed + - self.__reject_set + - self.__avoid_set + ): + self.__progress() + + if self.__installed_dict[name] in self.__removal_fmris: + # we're uninstalling this package + continue + + if name in self.__possible_dict: + self.__addclauses( + self.__gen_one_of_these_clauses(self.__possible_dict[name]) ) - def __set_proposed_required(self, proposed_dict, excludes): - """Add the common set of conditional, group, and require - dependencies of proposed packages to the list of package stems - known to be a required part of the solution. This will improve - error messaging if no solution is found.""" - - if proposed_dict is None: - return - - req_dep_names = set() - for name in proposed_dict: - # Find intersection of the set of conditional, group, - # and require dependencies for all proposed versions of - # the proposed package. The result is the set of - # package stems we know will be required to be part of - # the solution. - comm_deps = None - propvers = set(self.__dotrim(proposed_dict[name])) - for f in propvers: - prop_deps = set( - dname for (dtype, dname) in ( - (da.attrs["type"], - pkg.fmri.extract_pkg_name( - da.attrs["fmri"])) - for da in self.__get_dependency_actions( - f, excludes) - if da.attrs["type"] == "conditional" or - da.attrs["type"] == "group" or - da.attrs["type"] == "require" - ) - if dtype != "group" or - (dname not in self.__avoid_set and - dname not in self.__reject_set) - ) - - if comm_deps is None: - comm_deps = prop_deps - else: - comm_deps &= prop_deps - - if comm_deps: - req_dep_names |= comm_deps - - self.__req_pkg_names = frozenset(req_dep_names | - self.__req_pkg_names) - - def __update_possible_closure(self, possible, excludes, - full_trim=False, filter_explicit=True, proposed_dict=None): - """Update the provided possible set of fmris with the transitive - closure of dependencies that can be satisfied, trimming those - packages that cannot be installed. - - 'possible' is a set of FMRI objects representing all possible - versions of packages to consider for the operation. - - 'full_trim' is an optional boolean indicating whether a full - trim of the dependency graph should be performed. This is NOT - required for the solver to find a solution. Trimming is only - needed to reduce the size of clauses and to provide error - messages. This requires multiple passes to determine if the - transitive closure of dependencies can be satisfied. This is - not required for correctness (and it greatly increases runtime). - However, it does greatly improve error messaging for some error - cases. - - 'filter_explicit' is an optional boolean indicating whether - packages with pkg.depend.explicit-install set to true will be - filtered out. - - 'proposed_dict' contains user specified FMRI objects indexed by - pkg_name that should be installed or updated within an image. - - An example of a case where full_trim will be useful (dueling - incorporations): - - Installed: - entire - incorporates java-7-incorporation - Proposed: - osnet-incorporation - incorporates system/resource-mgmt/dynamic-resource-pools - system/resource-mgmt/dynamic-resource-pools - requires new version of java not allowed by installed - java-7-incorporation""" - - first = True - while True: - tsize = len(self.__trim_dict) - res = self.__generate_dependency_closure( - possible, excludes=excludes, full_trim=full_trim, - filter_explicit=filter_explicit, - proposed_dict=proposed_dict) - if first: - # The first pass will return the transitive - # closure of all dependencies; subsequent passes - # are only done for trimming, so need to update - # the possible set only on first pass. - possible.update(res) - first = False - - nsize = len(self.__trim_dict) - if not full_trim or nsize == tsize: - # Nothing more to trim. - break - - # Remove trimmed items from possible_set. - possible.difference_update(six.iterkeys(self.__trim_dict)) - - def __enforce_unique_packages(self, excludes): - """Constrain the solver solution so that only one version of - each package can be installed and generate dependency clauses - for possible packages.""" - - # Generate clauses for only one version of each package, and - # for dependencies for each package. Do so for all possible - # fmris. - for name in self.__possible_dict: - self.__progress() - # Ensure only one version of a package is installed - self.__addclauses(self.__gen_highlander_clauses( - self.__possible_dict[name])) - # generate dependency clauses for each pkg - for fmri in self.__possible_dict[name]: - for da in self.__get_dependency_actions(fmri, - excludes=excludes): - self.__addclauses( - self.__gen_dependency_clauses(fmri, - da)) - - def __generate_operation_clauses(self, proposed=None, - proposed_dict=None): - """Generate initial solver clauses for the proposed packages (if - any) and installed packages. - - 'proposed' is a set of FMRI objects representing packages to - install or update. - - 'proposed_dict' contains user specified FMRI objects indexed by - pkg_name that should be installed or updated within an image.""" - - assert ((proposed is None and proposed_dict is None) or - (proposed is not None and proposed_dict is not None)) - - if proposed is None: - proposed = set() - if proposed_dict is None: - proposed_dict = EmptyDict - - # Generate clauses for proposed and installed pkgs note that we - # create clauses that require one of the proposed pkgs to work; - # this allows the possible_set to always contain the existing - # pkgs. - for name in proposed_dict: - self.__progress() - self.__addclauses( - self.__gen_one_of_these_clauses( - set(proposed_dict[name]) & - set(self.__possible_dict[name]))) - - for name in (self.__installed_pkgs - proposed - - self.__reject_set - self.__avoid_set): - self.__progress() - - if (self.__installed_dict[name] in - self.__removal_fmris): - # we're uninstalling this package - continue - - if name in self.__possible_dict: - self.__addclauses( - self.__gen_one_of_these_clauses( - self.__possible_dict[name])) - - def __begin_solve(self): - """Prepares solver for solution creation returning a - ProgressTracker object to be used for the operation.""" - - # Once solution has been returned or failure has occurred, a new - # solver must be used. - assert self.__state == SOLVER_INIT - self.__state = SOLVER_OXY - - pt = self.__progtrack - # Check to see if we were invoked by solve_uninstall, in - # which case we don't want to restart what we've already - # started. - if self.__progitem is None: - self.__progitem = pt.PLAN_SOLVE_SETUP - pt.plan_start(pt.PLAN_SOLVE_SETUP) - self.__start_subphase(1, reset=True) - - return pt - - def __end_solve(self, solution, excludes): - """Returns the solution result to the caller after completing - all necessary solution cleanup.""" - - pt = self.__progtrack - self.__end_subphase() # end the last subphase. - pt.plan_done(pt.PLAN_SOLVE_SOLVER) - return self.__cleanup((self.__elide_possible_renames(solution, - excludes), (self.__avoid_set, self.__implicit_avoid_set, - self.__obs_set))) - - def __assert_installed_allowed(self, excludes, proposed=None): - """Raises a PlanCreationException if the proposed operation - would require the removal of installed packages that are not - marked for removal by the proposed operation.""" - - if proposed is None: - proposed = set() - - uninstall_fmris = [] - for name in (self.__installed_pkgs - proposed - - self.__reject_set - self.__avoid_set): - self.__progress() - - if (self.__installed_dict[name] in - self.__removal_fmris): - # we're uninstalling this package - continue - - if name in self.__possible_dict: - continue - - # no version of this package is allowed - uninstall_fmris.append(self.__installed_dict[name]) - - # Used to de-dup errors. - already_seen = set() - ret = [] - msg = N_("Package '{0}' must be uninstalled or upgraded " - "if the requested operation is to be performed.") - - # First check for solver failures caused by missing parent - # dependencies. We do this because missing parent dependency - # failures cause other cascading failures, so it's better to - # just emit these failures first, have the user fix them, and - # have them re-run the operation, so then we can provide more - # concise error output about other problems. - for fmri in uninstall_fmris: - # Unused variable; pylint: disable=W0612 - for reason_id, reason_t, fmris in \ - self.__trim_dict.get(fmri, EmptyI): - if reason_id == _TRIM_PARENT_MISSING: - break - else: - continue - res = self.__fmri_list_errors([fmri], - already_seen=already_seen) - assert res - ret.extend([msg.format(fmri.pkg_name)]) - ret.extend(res) - - if ret: - self.__raise_solution_error(no_version=ret) - - for fmri in uninstall_fmris: - flist = [fmri] - if fmri in self.__linked_pkgs: - depend_self = any( - da - for da in self.__get_dependency_actions( - fmri, excludes) - if da.attrs["type"] == "parent" and - pkg.actions.depend.DEPEND_SELF in - da.attrlist("fmri") - ) - - if depend_self: - pf = self.__parent_dict.get( - fmri.pkg_name) - if pf and pf != fmri: - # include parent's version of - # parent-constrained packages in - # error messaging for clarity if - # different - flist.append(pf) - - res = self.__fmri_list_errors(flist, - already_seen=already_seen) - - # If no errors returned, that implies that all of the - # reasons the FMRI was rejected aren't interesting. - if res: - ret.extend([msg.format(fmri.pkg_name)]) - ret.extend(res) - - if ret: - self.__raise_solution_error(no_version=ret) - - def __assert_trim_errors(self, possible_set, excludes, proposed=None, - proposed_dict=None): - """Raises a PlanCreationException if any further trims would - prevent the installation or update of proposed or - installed/required packages. - - 'proposed' is an optional set of FMRI objects representing - packages to install or update. - - 'proposed_dict' contains user specified FMRIs indexed by - pkg_name that should be installed within an image. - - 'possible_set' is the set of FMRIs potentially allowed for use - in the proposed operation.""" - - # make sure all package trims appear - self.__trimdone = False - - # Ensure required dependencies of proposed packages are flagged - # to improve error messaging when parsing the transitive - # closure of all dependencies. - self.__set_proposed_required(proposed_dict, excludes) - - # First, perform a full trim of the package version space; this - # is normally skipped for performance reasons as it's not - # required for correctness. - self.__update_possible_closure(possible_set, excludes, - full_trim=True, filter_explicit=False, - proposed_dict=proposed_dict) - - # Now try re-asserting that proposed (if any) and installed - # packages are allowed after the trimming; these calls will - # raise an exception if all the proposed or any of the - # installed/required packages are trimmed. - self.__set_proposed_required(proposed_dict, excludes) - self.__trim_proposed(proposed_dict) - self.__assign_possible(possible_set) - self.__assert_installed_allowed(excludes, proposed=proposed) - - def __raise_install_error(self, exp, inc_list, proposed_dict, - possible_set, excludes): - """Private logic for solve_install() to process a - PlanCreationException and re-raise as appropriate. - - 'exp' is the related exception object raised by the solver when - no solution was found. - - 'inc_list' is a list of package FMRIs representing installed - incorporations that are being maintained. - - 'proposed_dict' contains user specified FMRIs indexed by - pkg_name that should be installed within an image. - - 'possible_set' is the set of FMRIs potentially allowed for use - in the proposed operation. - """ - - # Before making a guess, apply extra trimming to see if we can - # reject the operation based on changing packages. - self.__assert_trim_errors(possible_set, excludes, - proposed_dict=proposed_dict) - - # Despite all of the trimming done, we still don't know why the - # solver couldn't find a solution, so make a best effort guess - # at the reason why. - info = [] - incs = [] - - incs.append("") - if inc_list: - incs.append("maintained incorporations:") - skey = operator.attrgetter('pkg_name') - for il in sorted(inc_list, key=skey): - incs.append(" {0}".format(il.get_short_fmri())) - else: - incs.append("maintained incorporations: None") - incs.append("") - - ms = self.__generate_dependency_errors([ - b for a in proposed_dict.values() - for b in a - ], excludes=excludes) - if ms: - info.append("") - info.append(_("Plan Creation: dependency error(s) in " - "proposed packages:")) - info.append("") - for s in ms: - info.append(" {0}".format(s)) - - ms = self.__check_installed() - if ms: - info.append("") - info.append(_("Plan Creation: Errors in installed " - "packages due to proposed changes:")) - info.append("") - for s in ms: - info.append(" {0}".format(s)) - - if not info: # both error detection methods insufficent. - info.append(_("Plan Creation: Package solver is " - "unable to compute solution.")) - info.append(_("Dependency analysis is unable to " - "determine exact cause.")) - info.append(_("Try running with -vv to " - "obtain more detailed error messages.")) - exp.no_solution = incs + info - - # Value 'DebugValues' is unsubscriptable; - # pylint: disable=E1136 - if DebugValues["plan"]: - exp.solver_errors = self.get_trim_errors() - raise exp - - def add_triggered_op(self, trigger_op, exec_op, fmris): - """Add the set of FMRIs in 'fmris' to the internal dict of - pkg-actuators. 'trigger_op' is the operation which triggered - the pkg change, 'exec_op' is the operation which is supposed to - be executed.""" - - assert trigger_op in self.__triggered_ops, "{0} is " \ - "not a valid trigger op for pkg actuators".format( - trigger_op) - assert exec_op in self.__triggered_ops[trigger_op], "{0} is " \ - "not a valid execution op for pkg actuators".format(exec_op) - assert isinstance(fmris, set) - - self.__triggered_ops[trigger_op][exec_op] |= fmris - - def solve_install(self, existing_freezes, proposed_dict, - new_variants=None, excludes=EmptyI, - reject_set=frozenset(), trim_proposed_installed=True, - relax_all=False, ignore_inst_parent_deps=False, - exact_install=False, installed_dict_tmp=EmptyDict): - """Logic to install packages, change variants, and/or change - facets. - - Returns FMRIs to be installed / upgraded in system and a new - set of packages to be avoided. - - 'existing_freezes' is a list of incorp. style FMRIs that - constrain package motion. - - 'proposed_dict' contains user specified FMRIs indexed by - pkg_name that should be installed within an image. - - 'new_variants' a dictionary containing variants which are - being updated. (It should not contain existing variants which - are not changing.) - - 'reject_set' contains user specified package names that should - not be present within the final image. (These packages may or - may not be currently installed.) - - 'trim_proposed_installed' is a boolean indicating whether the - solver should elide versions of proposed packages older than - those installed from the set of possible solutions. If False, - package downgrades are allowed, but only for installed - packages matching those in the proposed_dict. - - 'relax_all' indicates if the solver should relax all install - holds, or only install holds specified by proposed packages. - - 'ignore_inst_parent_deps' indicates if the solver should - ignore parent dependencies for installed packages. This - allows us to modify images with unsatisfied parent - dependencies (i.e., out-of-sync images). Any packaging - operation which needs to guarantee that we have an in-sync - image (for example, sync-linked operations, or any recursive - packaging operations) should NOT enable this behavior. - - 'exact_install' is a flag to indicate whether we treat the - current image as an empty one. Any previously installed - packages that are not either specified in proposed_dict or - are a dependency (require, origin and parent dependencies) - of those packages will be removed. - - 'installed_dict_tmp' a dictionary containing the current - installed FMRIs indexed by pkg_name. Used when exact_install - is on.""" - - pt = self.__begin_solve() - - # reject_set is a frozenset(), need to make copy to modify - r_set = set(reject_set) - for f in self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UPDATE]: - if f.pkg_name in proposed_dict: - proposed_dict[f.pkg_name].append(f) - else: - proposed_dict[f.pkg_name] = [f] - for f in \ - self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UNINSTALL]: - r_set.add(f.pkg_name) - # re-freeze reject set - reject_set = frozenset(r_set) - - proposed_pkgs = set(proposed_dict) - - if new_variants: - self.__variants = new_variants - - # - # Entire packages can be tagged with variants thereby - # making those packages uninstallable in certain - # images. So if we're changing variants such that - # some currently installed packages are becoming - # uninstallable add them to the removal package set. - # - for f in self.__installed_fmris: - d = self.__get_variant_dict(f) - for k in new_variants: - if k in d and \ - new_variants[k] not in d[k]: - self.__removal_fmris |= set([f]) - - # proposed_dict already contains publisher selection logic, - # so prevent any further trimming of named packages based - # on publisher if they are installed. - for name in proposed_dict: - if name in self.__installed_dict: - self.__mark_pub_trimmed(name) - else: - self.__publisher[name] = \ - proposed_dict[name][0].publisher - - # Determine which packages are to be removed, rejected, and - # avoided and also determine which ones must not be removed - # during the operation. - self.__set_removed_and_required_packages(rejected=reject_set, - proposed=proposed_pkgs) - self.__progress() + def __begin_solve(self): + """Prepares solver for solution creation returning a + ProgressTracker object to be used for the operation.""" + + # Once solution has been returned or failure has occurred, a new + # solver must be used. + assert self.__state == SOLVER_INIT + self.__state = SOLVER_OXY + + pt = self.__progtrack + # Check to see if we were invoked by solve_uninstall, in + # which case we don't want to restart what we've already + # started. + if self.__progitem is None: + self.__progitem = pt.PLAN_SOLVE_SETUP + pt.plan_start(pt.PLAN_SOLVE_SETUP) + self.__start_subphase(1, reset=True) + + return pt + + def __end_solve(self, solution, excludes): + """Returns the solution result to the caller after completing + all necessary solution cleanup.""" + + pt = self.__progtrack + self.__end_subphase() # end the last subphase. + pt.plan_done(pt.PLAN_SOLVE_SOLVER) + return self.__cleanup( + ( + self.__elide_possible_renames(solution, excludes), + (self.__avoid_set, self.__implicit_avoid_set, self.__obs_set), + ) + ) + + def __assert_installed_allowed(self, excludes, proposed=None): + """Raises a PlanCreationException if the proposed operation + would require the removal of installed packages that are not + marked for removal by the proposed operation.""" + + if proposed is None: + proposed = set() + + uninstall_fmris = [] + for name in ( + self.__installed_pkgs + - proposed + - self.__reject_set + - self.__avoid_set + ): + self.__progress() + + if self.__installed_dict[name] in self.__removal_fmris: + # we're uninstalling this package + continue + + if name in self.__possible_dict: + continue + + # no version of this package is allowed + uninstall_fmris.append(self.__installed_dict[name]) + + # Used to de-dup errors. + already_seen = set() + ret = [] + msg = N_( + "Package '{0}' must be uninstalled or upgraded " + "if the requested operation is to be performed." + ) + + # First check for solver failures caused by missing parent + # dependencies. We do this because missing parent dependency + # failures cause other cascading failures, so it's better to + # just emit these failures first, have the user fix them, and + # have them re-run the operation, so then we can provide more + # concise error output about other problems. + for fmri in uninstall_fmris: + # Unused variable; pylint: disable=W0612 + for reason_id, reason_t, fmris in self.__trim_dict.get( + fmri, EmptyI + ): + if reason_id == _TRIM_PARENT_MISSING: + break + else: + continue + res = self.__fmri_list_errors([fmri], already_seen=already_seen) + assert res + ret.extend([msg.format(fmri.pkg_name)]) + ret.extend(res) + + if ret: + self.__raise_solution_error(no_version=ret) + + for fmri in uninstall_fmris: + flist = [fmri] + if fmri in self.__linked_pkgs: + depend_self = any( + da + for da in self.__get_dependency_actions(fmri, excludes) + if da.attrs["type"] == "parent" + and pkg.actions.depend.DEPEND_SELF in da.attrlist("fmri") + ) - # find list of incorps we don't let change as a side effect of - # other changes; exclude any specified on command line if the - # proposed version is already installed and is not being removed - # translate proposed_dict into a set - if relax_all: - relax_pkgs = self.__installed_pkgs - else: - relax_pkgs = set( - name - for name in proposed_pkgs - if not any( - f for f in proposed_dict[name] - if len(proposed_dict[name]) == 1 and - f in (self.__installed_fmris - - self.__removal_fmris) - ) - ) - relax_pkgs |= \ - self.__installed_unsatisfied_parent_deps(excludes, - ignore_inst_parent_deps) - - inc_list, con_lists = self.__get_installed_unbound_inc_list( - relax_pkgs, excludes=excludes) - self.__inc_list = inc_list - - self.__start_subphase(2) - # generate set of possible fmris - # - # ensure existing pkgs stay installed; explicitly add in - # installed fmris in case publisher change has occurred and - # some pkgs aren't part of new publisher - possible_set = set() - self.__allowed_downgrades = set() - for f in self.__installed_fmris - self.__removal_fmris: - possible_set |= self.__comb_newer_fmris(f)[0] | set([f]) - - # Add the proposed fmris, populate self.__expl_install_dict and - # check for allowed downgrades. - self.__expl_install_dict = defaultdict(list) - for name, flist in proposed_dict.items(): - possible_set.update(flist) - for f in flist: - self.__progress() - self.__allowed_downgrades |= \ - self.__allow_incorp_downgrades(f, - excludes=excludes) - if self.__is_explicit_install(f): - self.__expl_install_dict[name].append(f) - - # For linked image sync we have to analyze all pkgs of the - # possible_set because no proposed pkgs will be given. However, - # that takes more time so only do this for syncs. The relax_all - # flag is an indicator of a sync operation. - if not proposed_dict.values() and relax_all: - for f in possible_set: - self.__progress() - self.__allowed_downgrades |= \ - self.__allow_incorp_downgrades(f, - excludes=excludes, relax_all=True) - - possible_set |= self.__allowed_downgrades - - self.__start_subphase(3) - # If requested, trim any proposed fmris older than those of - # corresponding installed packages. - candidate_fmris = self.__installed_fmris - \ - self.__removal_fmris - - for f in candidate_fmris: - self.__progress() - if not trim_proposed_installed and \ - f.pkg_name in proposed_dict: - # Don't trim versions if newest version in - # proposed dict is older than installed - # version. - verlist = proposed_dict[f.pkg_name] - if verlist[-1].version < f.version: - # Assume downgrade is intentional. - continue - valid_trigger = False - for tf in self.__triggered_ops[ - PKG_OP_UNINSTALL][PKG_OP_UPDATE]: - if tf.pkg_name == f.pkg_name: - self.__trim_older(tf) - valid_trigger = True - if valid_trigger: - continue - - self.__trim_older(f) - - # trim fmris we excluded via proposed_fmris - for name in proposed_dict: - self.__progress() - self.__trim(set(self.__get_catalog_fmris(name)) - - set(proposed_dict[name]), - _TRIM_PROPOSED_VER, - N_("This version excluded by specified " - "installation version")) - # trim packages excluded by incorps in proposed. - self.__trim_recursive_incorps(proposed_dict[name], - excludes, _TRIM_PROPOSED_INC) - - # Trim packages with unsatisfied parent dependencies. For any - # remaining allowable linked packages check if they are in - # relax_pkgs. (Which means that either a version of them was - # requested explicitly on the command line or a version of them - # is installed which has unsatisfied parent dependencies and - # needs to be upgraded.) In that case add the allowable - # packages to possible_linked so we can call - # __trim_recursive_incorps() on them to trim out more packages - # that may be disallowed due to synced incorporations. - if self.__is_child(): - possible_linked = defaultdict(set) - for f in possible_set.copy(): - self.__progress() - if not self.__trim_nonmatching_parents(f, - excludes, ignore_inst_parent_deps): - possible_set.remove(f) - continue - if (f in self.__linked_pkgs and - f.pkg_name in relax_pkgs): - possible_linked[f.pkg_name].add(f) - for name in possible_linked: - # calling __trim_recursive_incorps can be - # expensive so don't call it for versions except - # the one currently installed in the parent if - # it has been proposed. - if name in proposed_dict: - pf = self.__parent_dict.get(name) - possible_linked[name] -= \ - set(proposed_dict[name]) - \ - set([pf]) - if not possible_linked[name]: - continue - self.__progress() - self.__trim_recursive_incorps( - list(possible_linked[name]), - excludes, _TRIM_SYNCED_INC) - del possible_linked - - self.__start_subphase(4) - # now trim pkgs we cannot update due to maintained - # incorporations - for i, flist in zip(inc_list, con_lists): - reason = (N_("This version is excluded by installed " - "incorporation {0}"), (i.get_short_fmri( - anarchy=True, include_scheme=False),)) - self.__trim(self.__comb_auto_fmris(i)[1], - _TRIM_INSTALLED_INC, reason) - for f in flist: - # dotrim=False here as we only want to trim - # packages that don't satisfy the incorporation. - self.__trim(self.__comb_auto_fmris(f, - dotrim=False)[1], _TRIM_INSTALLED_INC, - reason) - - self.__start_subphase(5) - # now trim any pkgs we cannot update due to freezes - self.__trim_frozen(existing_freezes) - - self.__start_subphase(6) - # elide any proposed versions that don't match variants (arch - # usually) - for name in proposed_dict: - for fmri in proposed_dict[name]: - self.__trim_nonmatching_variants(fmri) - - self.__start_subphase(7) - # remove any versions from proposed_dict that are in trim_dict - try: - self.__trim_proposed(proposed_dict) - except api_errors.PlanCreationException as exp: - # One or more proposed packages have been rejected. - self.__raise_install_error(exp, inc_list, proposed_dict, - set(), excludes) - - self.__start_subphase(8) - - # Ensure required dependencies of proposed packages are flagged - # to improve error messaging when parsing the transitive - # closure of all dependencies. - self.__set_proposed_required(proposed_dict, excludes) - - # Update the set of possible fmris with the transitive closure - # of all dependencies. - self.__update_possible_closure(possible_set, excludes, - proposed_dict=proposed_dict) - - self.__start_subphase(9) - # trim any non-matching variants, origins or parents - for f in possible_set: - self.__progress() - if not self.__trim_nonmatching_parents(f, excludes, - ignore_inst_parent_deps): - continue - if not self.__trim_nonmatching_variants(f): - continue - self.__trim_nonmatching_origins(f, excludes, - exact_install=exact_install, - installed_dict_tmp=installed_dict_tmp) - - self.__start_subphase(10) - # remove all trimmed fmris from consideration - possible_set.difference_update(six.iterkeys(self.__trim_dict)) - # remove any versions from proposed_dict that are in trim_dict - # as trim dict has been updated w/ missing dependencies - try: - self.__trim_proposed(proposed_dict) - except api_errors.PlanCreationException as exp: - # One or more proposed packages have been rejected. - self.__raise_install_error(exp, inc_list, proposed_dict, - possible_set, excludes) - - self.__start_subphase(11) - # - # Generate ids, possible_dict for clause generation. Prepare - # the solver for invocation. - # - self.__assign_fmri_ids(possible_set) - - # Constrain the solution so that only one version of each - # package can be installed. - self.__enforce_unique_packages(excludes) - - self.__start_subphase(12) - # Add proposed and installed packages to solver. - self.__generate_operation_clauses(proposed=proposed_pkgs, - proposed_dict=proposed_dict) - try: - self.__assert_installed_allowed(excludes, - proposed=proposed_pkgs) - except api_errors.PlanCreationException as exp: - # One or more installed packages can't be retained or - # upgraded. - self.__raise_install_error(exp, inc_list, proposed_dict, - possible_set, excludes) - - pt.plan_done(pt.PLAN_SOLVE_SETUP) - - self.__progitem = pt.PLAN_SOLVE_SOLVER - pt.plan_start(pt.PLAN_SOLVE_SOLVER) - self.__start_subphase(13) - # save a solver instance so we can come back here - # this is where errors happen... - saved_solver = self.__save_solver() - try: - saved_solution = self.__solve() - except api_errors.PlanCreationException as exp: - # no solution can be found. - self.__raise_install_error(exp, inc_list, proposed_dict, - possible_set, excludes) - - self.__start_subphase(14) - # we have a solution that works... attempt to - # reduce collateral damage to other packages - # while still keeping command line pkgs at their - # optimum level - - self.__restore_solver(saved_solver) - - # fix the fmris that were specified on the cmd line - # at their optimum (newest) level along with the - # new dependencies, but try and avoid upgrading - # already installed pkgs or adding un-needed new pkgs. - - for fmri in saved_solution: - if fmri.pkg_name in proposed_dict: - self.__addclauses( - self.__gen_one_of_these_clauses([fmri])) - - self.__start_subphase(15) - # save context - saved_solver = self.__save_solver() - - saved_solution = self.__solve(older=True) - - self.__start_subphase(16) - # Now we have the oldest possible original fmris - # but we may have some that are not original - # Since we want to move as far forward as possible - # when we have to move a package, fix the originals - # and drive forward again w/ the remainder - self.__restore_solver(saved_solver) - - for fmri in saved_solution & self.__installed_fmris: - self.__addclauses( - self.__gen_one_of_these_clauses([fmri])) - - solution = self.__solve() + if depend_self: + pf = self.__parent_dict.get(fmri.pkg_name) + if pf and pf != fmri: + # include parent's version of + # parent-constrained packages in + # error messaging for clarity if + # different + flist.append(pf) + + res = self.__fmri_list_errors(flist, already_seen=already_seen) + + # If no errors returned, that implies that all of the + # reasons the FMRI was rejected aren't interesting. + if res: + ret.extend([msg.format(fmri.pkg_name)]) + ret.extend(res) + + if ret: + self.__raise_solution_error(no_version=ret) + + def __assert_trim_errors( + self, possible_set, excludes, proposed=None, proposed_dict=None + ): + """Raises a PlanCreationException if any further trims would + prevent the installation or update of proposed or + installed/required packages. + + 'proposed' is an optional set of FMRI objects representing + packages to install or update. + + 'proposed_dict' contains user specified FMRIs indexed by + pkg_name that should be installed within an image. + + 'possible_set' is the set of FMRIs potentially allowed for use + in the proposed operation.""" + + # make sure all package trims appear + self.__trimdone = False + + # Ensure required dependencies of proposed packages are flagged + # to improve error messaging when parsing the transitive + # closure of all dependencies. + self.__set_proposed_required(proposed_dict, excludes) + + # First, perform a full trim of the package version space; this + # is normally skipped for performance reasons as it's not + # required for correctness. + self.__update_possible_closure( + possible_set, + excludes, + full_trim=True, + filter_explicit=False, + proposed_dict=proposed_dict, + ) + + # Now try re-asserting that proposed (if any) and installed + # packages are allowed after the trimming; these calls will + # raise an exception if all the proposed or any of the + # installed/required packages are trimmed. + self.__set_proposed_required(proposed_dict, excludes) + self.__trim_proposed(proposed_dict) + self.__assign_possible(possible_set) + self.__assert_installed_allowed(excludes, proposed=proposed) + + def __raise_install_error( + self, exp, inc_list, proposed_dict, possible_set, excludes + ): + """Private logic for solve_install() to process a + PlanCreationException and re-raise as appropriate. + + 'exp' is the related exception object raised by the solver when + no solution was found. + + 'inc_list' is a list of package FMRIs representing installed + incorporations that are being maintained. + + 'proposed_dict' contains user specified FMRIs indexed by + pkg_name that should be installed within an image. + + 'possible_set' is the set of FMRIs potentially allowed for use + in the proposed operation. + """ + + # Before making a guess, apply extra trimming to see if we can + # reject the operation based on changing packages. + self.__assert_trim_errors( + possible_set, excludes, proposed_dict=proposed_dict + ) + + # Despite all of the trimming done, we still don't know why the + # solver couldn't find a solution, so make a best effort guess + # at the reason why. + info = [] + incs = [] + + incs.append("") + if inc_list: + incs.append("maintained incorporations:") + skey = operator.attrgetter("pkg_name") + for il in sorted(inc_list, key=skey): + incs.append(" {0}".format(il.get_short_fmri())) + else: + incs.append("maintained incorporations: None") + incs.append("") + + ms = self.__generate_dependency_errors( + [b for a in proposed_dict.values() for b in a], excludes=excludes + ) + if ms: + info.append("") + info.append( + _("Plan Creation: dependency error(s) in " "proposed packages:") + ) + info.append("") + for s in ms: + info.append(" {0}".format(s)) + + ms = self.__check_installed() + if ms: + info.append("") + info.append( + _( + "Plan Creation: Errors in installed " + "packages due to proposed changes:" + ) + ) + info.append("") + for s in ms: + info.append(" {0}".format(s)) + + if not info: # both error detection methods insufficent. + info.append( + _( + "Plan Creation: Package solver is " + "unable to compute solution." + ) + ) + info.append( + _("Dependency analysis is unable to " "determine exact cause.") + ) + info.append( + _( + "Try running with -vv to " + "obtain more detailed error messages." + ) + ) + exp.no_solution = incs + info + + # Value 'DebugValues' is unsubscriptable; + # pylint: disable=E1136 + if DebugValues["plan"]: + exp.solver_errors = self.get_trim_errors() + raise exp + + def add_triggered_op(self, trigger_op, exec_op, fmris): + """Add the set of FMRIs in 'fmris' to the internal dict of + pkg-actuators. 'trigger_op' is the operation which triggered + the pkg change, 'exec_op' is the operation which is supposed to + be executed.""" + + assert ( + trigger_op in self.__triggered_ops + ), "{0} is " "not a valid trigger op for pkg actuators".format( + trigger_op + ) + assert ( + exec_op in self.__triggered_ops[trigger_op] + ), "{0} is " "not a valid execution op for pkg actuators".format( + exec_op + ) + assert isinstance(fmris, set) + + self.__triggered_ops[trigger_op][exec_op] |= fmris + + def solve_install( + self, + existing_freezes, + proposed_dict, + new_variants=None, + excludes=EmptyI, + reject_set=frozenset(), + trim_proposed_installed=True, + relax_all=False, + ignore_inst_parent_deps=False, + exact_install=False, + installed_dict_tmp=EmptyDict, + ): + """Logic to install packages, change variants, and/or change + facets. + + Returns FMRIs to be installed / upgraded in system and a new + set of packages to be avoided. + + 'existing_freezes' is a list of incorp. style FMRIs that + constrain package motion. + + 'proposed_dict' contains user specified FMRIs indexed by + pkg_name that should be installed within an image. + + 'new_variants' a dictionary containing variants which are + being updated. (It should not contain existing variants which + are not changing.) + + 'reject_set' contains user specified package names that should + not be present within the final image. (These packages may or + may not be currently installed.) + + 'trim_proposed_installed' is a boolean indicating whether the + solver should elide versions of proposed packages older than + those installed from the set of possible solutions. If False, + package downgrades are allowed, but only for installed + packages matching those in the proposed_dict. + + 'relax_all' indicates if the solver should relax all install + holds, or only install holds specified by proposed packages. + + 'ignore_inst_parent_deps' indicates if the solver should + ignore parent dependencies for installed packages. This + allows us to modify images with unsatisfied parent + dependencies (i.e., out-of-sync images). Any packaging + operation which needs to guarantee that we have an in-sync + image (for example, sync-linked operations, or any recursive + packaging operations) should NOT enable this behavior. + + 'exact_install' is a flag to indicate whether we treat the + current image as an empty one. Any previously installed + packages that are not either specified in proposed_dict or + are a dependency (require, origin and parent dependencies) + of those packages will be removed. + + 'installed_dict_tmp' a dictionary containing the current + installed FMRIs indexed by pkg_name. Used when exact_install + is on.""" + + pt = self.__begin_solve() + + # reject_set is a frozenset(), need to make copy to modify + r_set = set(reject_set) + for f in self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UPDATE]: + if f.pkg_name in proposed_dict: + proposed_dict[f.pkg_name].append(f) + else: + proposed_dict[f.pkg_name] = [f] + for f in self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UNINSTALL]: + r_set.add(f.pkg_name) + # re-freeze reject set + reject_set = frozenset(r_set) + + proposed_pkgs = set(proposed_dict) + + if new_variants: + self.__variants = new_variants + + # + # Entire packages can be tagged with variants thereby + # making those packages uninstallable in certain + # images. So if we're changing variants such that + # some currently installed packages are becoming + # uninstallable add them to the removal package set. + # + for f in self.__installed_fmris: + d = self.__get_variant_dict(f) + for k in new_variants: + if k in d and new_variants[k] not in d[k]: + self.__removal_fmris |= set([f]) + + # proposed_dict already contains publisher selection logic, + # so prevent any further trimming of named packages based + # on publisher if they are installed. + for name in proposed_dict: + if name in self.__installed_dict: + self.__mark_pub_trimmed(name) + else: + self.__publisher[name] = proposed_dict[name][0].publisher + + # Determine which packages are to be removed, rejected, and + # avoided and also determine which ones must not be removed + # during the operation. + self.__set_removed_and_required_packages( + rejected=reject_set, proposed=proposed_pkgs + ) + self.__progress() + + # find list of incorps we don't let change as a side effect of + # other changes; exclude any specified on command line if the + # proposed version is already installed and is not being removed + # translate proposed_dict into a set + if relax_all: + relax_pkgs = self.__installed_pkgs + else: + relax_pkgs = set( + name + for name in proposed_pkgs + if not any( + f + for f in proposed_dict[name] + if len(proposed_dict[name]) == 1 + and f in (self.__installed_fmris - self.__removal_fmris) + ) + ) + relax_pkgs |= self.__installed_unsatisfied_parent_deps( + excludes, ignore_inst_parent_deps + ) + + inc_list, con_lists = self.__get_installed_unbound_inc_list( + relax_pkgs, excludes=excludes + ) + self.__inc_list = inc_list + + self.__start_subphase(2) + # generate set of possible fmris + # + # ensure existing pkgs stay installed; explicitly add in + # installed fmris in case publisher change has occurred and + # some pkgs aren't part of new publisher + possible_set = set() + self.__allowed_downgrades = set() + for f in self.__installed_fmris - self.__removal_fmris: + possible_set |= self.__comb_newer_fmris(f)[0] | set([f]) + + # Add the proposed fmris, populate self.__expl_install_dict and + # check for allowed downgrades. + self.__expl_install_dict = defaultdict(list) + for name, flist in proposed_dict.items(): + possible_set.update(flist) + for f in flist: self.__progress() - solution = self.__update_solution_set(solution, excludes) - - return self.__end_solve(solution, excludes) - - def solve_update_all(self, existing_freezes, excludes=EmptyI, - reject_set=frozenset()): - """Logic to update all packages within an image to the latest - versions possible. - - Returns FMRIs to be installed / upgraded in system and a new - set of packages to be avoided. - - 'existing_freezes' is a list of incorp. style FMRIs that - constrain pkg motion - - 'reject_set' contains user specified FMRIs that should not be - present within the final image. (These packages may or may - not be currently installed.) - """ - - pt = self.__begin_solve() + self.__allowed_downgrades |= self.__allow_incorp_downgrades( + f, excludes=excludes + ) + if self.__is_explicit_install(f): + self.__expl_install_dict[name].append(f) + + # For linked image sync we have to analyze all pkgs of the + # possible_set because no proposed pkgs will be given. However, + # that takes more time so only do this for syncs. The relax_all + # flag is an indicator of a sync operation. + if not proposed_dict.values() and relax_all: + for f in possible_set: + self.__progress() + self.__allowed_downgrades |= self.__allow_incorp_downgrades( + f, excludes=excludes, relax_all=True + ) - # Determine which packages are to be removed, rejected, and - # avoided and also determine which ones must not be removed - # during the operation. - self.__set_removed_and_required_packages(rejected=reject_set) + possible_set |= self.__allowed_downgrades + + self.__start_subphase(3) + # If requested, trim any proposed fmris older than those of + # corresponding installed packages. + candidate_fmris = self.__installed_fmris - self.__removal_fmris + + for f in candidate_fmris: + self.__progress() + if not trim_proposed_installed and f.pkg_name in proposed_dict: + # Don't trim versions if newest version in + # proposed dict is older than installed + # version. + verlist = proposed_dict[f.pkg_name] + if verlist[-1].version < f.version: + # Assume downgrade is intentional. + continue + valid_trigger = False + for tf in self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UPDATE]: + if tf.pkg_name == f.pkg_name: + self.__trim_older(tf) + valid_trigger = True + if valid_trigger: + continue + + self.__trim_older(f) + + # trim fmris we excluded via proposed_fmris + for name in proposed_dict: + self.__progress() + self.__trim( + set(self.__get_catalog_fmris(name)) - set(proposed_dict[name]), + _TRIM_PROPOSED_VER, + N_( + "This version excluded by specified " "installation version" + ), + ) + # trim packages excluded by incorps in proposed. + self.__trim_recursive_incorps( + proposed_dict[name], excludes, _TRIM_PROPOSED_INC + ) + + # Trim packages with unsatisfied parent dependencies. For any + # remaining allowable linked packages check if they are in + # relax_pkgs. (Which means that either a version of them was + # requested explicitly on the command line or a version of them + # is installed which has unsatisfied parent dependencies and + # needs to be upgraded.) In that case add the allowable + # packages to possible_linked so we can call + # __trim_recursive_incorps() on them to trim out more packages + # that may be disallowed due to synced incorporations. + if self.__is_child(): + possible_linked = defaultdict(set) + for f in possible_set.copy(): self.__progress() + if not self.__trim_nonmatching_parents( + f, excludes, ignore_inst_parent_deps + ): + possible_set.remove(f) + continue + if f in self.__linked_pkgs and f.pkg_name in relax_pkgs: + possible_linked[f.pkg_name].add(f) + for name in possible_linked: + # calling __trim_recursive_incorps can be + # expensive so don't call it for versions except + # the one currently installed in the parent if + # it has been proposed. + if name in proposed_dict: + pf = self.__parent_dict.get(name) + possible_linked[name] -= set(proposed_dict[name]) - set( + [pf] + ) + if not possible_linked[name]: + continue + self.__progress() + self.__trim_recursive_incorps( + list(possible_linked[name]), excludes, _TRIM_SYNCED_INC + ) + del possible_linked + + self.__start_subphase(4) + # now trim pkgs we cannot update due to maintained + # incorporations + for i, flist in zip(inc_list, con_lists): + reason = ( + N_( + "This version is excluded by installed " "incorporation {0}" + ), + (i.get_short_fmri(anarchy=True, include_scheme=False),), + ) + self.__trim( + self.__comb_auto_fmris(i)[1], _TRIM_INSTALLED_INC, reason + ) + for f in flist: + # dotrim=False here as we only want to trim + # packages that don't satisfy the incorporation. + self.__trim( + self.__comb_auto_fmris(f, dotrim=False)[1], + _TRIM_INSTALLED_INC, + reason, + ) - if self.__is_child(): - synced_parent_pkgs = \ - self.__installed_unsatisfied_parent_deps(excludes, - False) - else: - synced_parent_pkgs = frozenset() - - self.__start_subphase(2) - # generate set of possible fmris - possible_set = set() - for f in self.__installed_fmris - self.__removal_fmris: - self.__progress() - matching = self.__comb_newer_fmris(f)[0] - if not matching: # disabled publisher... - matching = set([f]) # staying put is an option - possible_set |= matching - - self.__allowed_downgrades = set() - for f in possible_set: - self.__allowed_downgrades |= \ - self.__allow_incorp_downgrades(f, excludes=excludes, - relax_all=True) - possible_set |= self.__allowed_downgrades - - # trim fmris we cannot install because they're older - for f in self.__installed_fmris: - self.__progress() - self.__trim_older(f) - - # now trim any pkgs we cannot update due to freezes - self.__trim_frozen(existing_freezes) - - # Trim packages with unsatisfied parent dependencies. Then - # for packages with satisfied parent dependenices (which will - # include incorporations), call __trim_recursive_incorps() to - # trim out more packages that are disallowed due to the synced - # incorporations. - if self.__is_child(): - possible_linked = defaultdict(set) - for f in possible_set.copy(): - self.__progress() - if not self.__trim_nonmatching_parents(f, - excludes): - possible_set.remove(f) - continue - if (f in self.__linked_pkgs and - f.pkg_name not in synced_parent_pkgs): - possible_linked[f.pkg_name].add(f) - for name in possible_linked: - self.__progress() - self.__trim_recursive_incorps( - list(possible_linked[name]), excludes, - _TRIM_SYNCED_INC) - del possible_linked - - self.__start_subphase(3) - # Update the set of possible FMRIs with the transitive closure - # of all dependencies. - self.__update_possible_closure(possible_set, excludes) - - # trim any non-matching origins or parents - for f in possible_set: - if self.__trim_nonmatching_parents(f, excludes): - if self.__trim_nonmatching_variants(f): - self.__trim_nonmatching_origins(f, - excludes) - - self.__start_subphase(4) - - # remove all trimmed fmris from consideration - possible_set.difference_update(six.iterkeys(self.__trim_dict)) - - # - # Generate ids, possible_dict for clause generation. Prepare - # the solver for invocation. - # - self.__assign_fmri_ids(possible_set) - - # Constrain the solution so that only one version of each - # package can be installed. - self.__enforce_unique_packages(excludes) - - self.__start_subphase(5) - # Add installed packages to solver. - self.__generate_operation_clauses() - try: - self.__assert_installed_allowed(excludes) - except api_errors.PlanCreationException: - # Attempt a full trim to see if we can raise a sensible - # error. If not, re-raise. - self.__assert_trim_errors(possible_set, excludes) - raise - - pt.plan_done(pt.PLAN_SOLVE_SETUP) - - self.__progitem = pt.PLAN_SOLVE_SOLVER - pt.plan_start(pt.PLAN_SOLVE_SOLVER) - self.__start_subphase(6) - try: - solution = self.__solve() - except api_errors.PlanCreationException: - # No solution can be found; attempt a full trim to see - # if we can raise a sensible error. If not, re-raise. - self.__assert_trim_errors(possible_set, excludes) - raise - - self.__update_solution_set(solution, excludes) - - for f in solution.copy(): - if self.__fmri_is_obsolete(f): - solution.remove(f) - - # If solution doesn't match installed set of packages, then an - # upgrade solution was found (heuristic): - if solution != self.__installed_fmris: - return self.__end_solve(solution, excludes) - - incorps = self.__get_installed_upgradeable_incorps( - excludes) - if not incorps or self.__is_child(): - # If there are no installed, upgradeable incorporations, - # then assume that no updates were available. Also if - # we're a linked image child we may not be able to - # update to the latest available incorporations due to - # parent constraints, so don't generate an error. - return self.__end_solve(solution, excludes) - - # Before making a guess, apply extra trimming to see if we can - # reject the operation based on changing packages. - self.__assert_trim_errors(possible_set, excludes) - - # Despite all of the trimming done, we still don't know why the - # solver couldn't find a solution, so make a best-effort guess - # at the reason why. - skey = operator.attrgetter('pkg_name') - info = [] - info.append(_("No solution found to update to latest available " - "versions.")) - info.append(_("This may indicate an overly constrained set of " - "packages are installed.")) - info.append(" ") - info.append(_("latest incorporations:")) - info.append(" ") - info.extend(( - " {0}".format(f) - for f in sorted(incorps, key=skey) - )) - info.append(" ") - - ms = self.__generate_dependency_errors(incorps, - excludes=excludes) - ms.extend(self.__check_installed()) - - if ms: - info.append(_("The following indicates why the system " - "cannot update to the latest version:")) - info.append(" ") - for s in ms: - info.append(" {0}".format(s)) + self.__start_subphase(5) + # now trim any pkgs we cannot update due to freezes + self.__trim_frozen(existing_freezes) + + self.__start_subphase(6) + # elide any proposed versions that don't match variants (arch + # usually) + for name in proposed_dict: + for fmri in proposed_dict[name]: + self.__trim_nonmatching_variants(fmri) + + self.__start_subphase(7) + # remove any versions from proposed_dict that are in trim_dict + try: + self.__trim_proposed(proposed_dict) + except api_errors.PlanCreationException as exp: + # One or more proposed packages have been rejected. + self.__raise_install_error( + exp, inc_list, proposed_dict, set(), excludes + ) + + self.__start_subphase(8) + + # Ensure required dependencies of proposed packages are flagged + # to improve error messaging when parsing the transitive + # closure of all dependencies. + self.__set_proposed_required(proposed_dict, excludes) + + # Update the set of possible fmris with the transitive closure + # of all dependencies. + self.__update_possible_closure( + possible_set, excludes, proposed_dict=proposed_dict + ) + + self.__start_subphase(9) + # trim any non-matching variants, origins or parents + for f in possible_set: + self.__progress() + if not self.__trim_nonmatching_parents( + f, excludes, ignore_inst_parent_deps + ): + continue + if not self.__trim_nonmatching_variants(f): + continue + self.__trim_nonmatching_origins( + f, + excludes, + exact_install=exact_install, + installed_dict_tmp=installed_dict_tmp, + ) + + self.__start_subphase(10) + # remove all trimmed fmris from consideration + possible_set.difference_update(six.iterkeys(self.__trim_dict)) + # remove any versions from proposed_dict that are in trim_dict + # as trim dict has been updated w/ missing dependencies + try: + self.__trim_proposed(proposed_dict) + except api_errors.PlanCreationException as exp: + # One or more proposed packages have been rejected. + self.__raise_install_error( + exp, inc_list, proposed_dict, possible_set, excludes + ) + + self.__start_subphase(11) + # + # Generate ids, possible_dict for clause generation. Prepare + # the solver for invocation. + # + self.__assign_fmri_ids(possible_set) + + # Constrain the solution so that only one version of each + # package can be installed. + self.__enforce_unique_packages(excludes) + + self.__start_subphase(12) + # Add proposed and installed packages to solver. + self.__generate_operation_clauses( + proposed=proposed_pkgs, proposed_dict=proposed_dict + ) + try: + self.__assert_installed_allowed(excludes, proposed=proposed_pkgs) + except api_errors.PlanCreationException as exp: + # One or more installed packages can't be retained or + # upgraded. + self.__raise_install_error( + exp, inc_list, proposed_dict, possible_set, excludes + ) + + pt.plan_done(pt.PLAN_SOLVE_SETUP) + + self.__progitem = pt.PLAN_SOLVE_SOLVER + pt.plan_start(pt.PLAN_SOLVE_SOLVER) + self.__start_subphase(13) + # save a solver instance so we can come back here + # this is where errors happen... + saved_solver = self.__save_solver() + try: + saved_solution = self.__solve() + except api_errors.PlanCreationException as exp: + # no solution can be found. + self.__raise_install_error( + exp, inc_list, proposed_dict, possible_set, excludes + ) + + self.__start_subphase(14) + # we have a solution that works... attempt to + # reduce collateral damage to other packages + # while still keeping command line pkgs at their + # optimum level + + self.__restore_solver(saved_solver) + + # fix the fmris that were specified on the cmd line + # at their optimum (newest) level along with the + # new dependencies, but try and avoid upgrading + # already installed pkgs or adding un-needed new pkgs. + + for fmri in saved_solution: + if fmri.pkg_name in proposed_dict: + self.__addclauses(self.__gen_one_of_these_clauses([fmri])) + + self.__start_subphase(15) + # save context + saved_solver = self.__save_solver() + + saved_solution = self.__solve(older=True) + + self.__start_subphase(16) + # Now we have the oldest possible original fmris + # but we may have some that are not original + # Since we want to move as far forward as possible + # when we have to move a package, fix the originals + # and drive forward again w/ the remainder + self.__restore_solver(saved_solver) + + for fmri in saved_solution & self.__installed_fmris: + self.__addclauses(self.__gen_one_of_these_clauses([fmri])) + + solution = self.__solve() + self.__progress() + solution = self.__update_solution_set(solution, excludes) + + return self.__end_solve(solution, excludes) + + def solve_update_all( + self, existing_freezes, excludes=EmptyI, reject_set=frozenset() + ): + """Logic to update all packages within an image to the latest + versions possible. + + Returns FMRIs to be installed / upgraded in system and a new + set of packages to be avoided. + + 'existing_freezes' is a list of incorp. style FMRIs that + constrain pkg motion + + 'reject_set' contains user specified FMRIs that should not be + present within the final image. (These packages may or may + not be currently installed.) + """ + + pt = self.__begin_solve() + + # Determine which packages are to be removed, rejected, and + # avoided and also determine which ones must not be removed + # during the operation. + self.__set_removed_and_required_packages(rejected=reject_set) + self.__progress() + + if self.__is_child(): + synced_parent_pkgs = self.__installed_unsatisfied_parent_deps( + excludes, False + ) + else: + synced_parent_pkgs = frozenset() + + self.__start_subphase(2) + # generate set of possible fmris + possible_set = set() + for f in self.__installed_fmris - self.__removal_fmris: + self.__progress() + matching = self.__comb_newer_fmris(f)[0] + if not matching: # disabled publisher... + matching = set([f]) # staying put is an option + possible_set |= matching + + self.__allowed_downgrades = set() + for f in possible_set: + self.__allowed_downgrades |= self.__allow_incorp_downgrades( + f, excludes=excludes, relax_all=True + ) + possible_set |= self.__allowed_downgrades + + # trim fmris we cannot install because they're older + for f in self.__installed_fmris: + self.__progress() + self.__trim_older(f) + + # now trim any pkgs we cannot update due to freezes + self.__trim_frozen(existing_freezes) + + # Trim packages with unsatisfied parent dependencies. Then + # for packages with satisfied parent dependenices (which will + # include incorporations), call __trim_recursive_incorps() to + # trim out more packages that are disallowed due to the synced + # incorporations. + if self.__is_child(): + possible_linked = defaultdict(set) + for f in possible_set.copy(): + self.__progress() + if not self.__trim_nonmatching_parents(f, excludes): + possible_set.remove(f) + continue + if ( + f in self.__linked_pkgs + and f.pkg_name not in synced_parent_pkgs + ): + possible_linked[f.pkg_name].add(f) + for name in possible_linked: + self.__progress() + self.__trim_recursive_incorps( + list(possible_linked[name]), excludes, _TRIM_SYNCED_INC + ) + del possible_linked + + self.__start_subphase(3) + # Update the set of possible FMRIs with the transitive closure + # of all dependencies. + self.__update_possible_closure(possible_set, excludes) + + # trim any non-matching origins or parents + for f in possible_set: + if self.__trim_nonmatching_parents(f, excludes): + if self.__trim_nonmatching_variants(f): + self.__trim_nonmatching_origins(f, excludes) + + self.__start_subphase(4) + + # remove all trimmed fmris from consideration + possible_set.difference_update(six.iterkeys(self.__trim_dict)) + + # + # Generate ids, possible_dict for clause generation. Prepare + # the solver for invocation. + # + self.__assign_fmri_ids(possible_set) + + # Constrain the solution so that only one version of each + # package can be installed. + self.__enforce_unique_packages(excludes) + + self.__start_subphase(5) + # Add installed packages to solver. + self.__generate_operation_clauses() + try: + self.__assert_installed_allowed(excludes) + except api_errors.PlanCreationException: + # Attempt a full trim to see if we can raise a sensible + # error. If not, re-raise. + self.__assert_trim_errors(possible_set, excludes) + raise + + pt.plan_done(pt.PLAN_SOLVE_SETUP) + + self.__progitem = pt.PLAN_SOLVE_SOLVER + pt.plan_start(pt.PLAN_SOLVE_SOLVER) + self.__start_subphase(6) + try: + solution = self.__solve() + except api_errors.PlanCreationException: + # No solution can be found; attempt a full trim to see + # if we can raise a sensible error. If not, re-raise. + self.__assert_trim_errors(possible_set, excludes) + raise + + self.__update_solution_set(solution, excludes) + + for f in solution.copy(): + if self.__fmri_is_obsolete(f): + solution.remove(f) + + # If solution doesn't match installed set of packages, then an + # upgrade solution was found (heuristic): + if solution != self.__installed_fmris: + return self.__end_solve(solution, excludes) + + incorps = self.__get_installed_upgradeable_incorps(excludes) + if not incorps or self.__is_child(): + # If there are no installed, upgradeable incorporations, + # then assume that no updates were available. Also if + # we're a linked image child we may not be able to + # update to the latest available incorporations due to + # parent constraints, so don't generate an error. + return self.__end_solve(solution, excludes) + + # Before making a guess, apply extra trimming to see if we can + # reject the operation based on changing packages. + self.__assert_trim_errors(possible_set, excludes) + + # Despite all of the trimming done, we still don't know why the + # solver couldn't find a solution, so make a best-effort guess + # at the reason why. + skey = operator.attrgetter("pkg_name") + info = [] + info.append( + _("No solution found to update to latest available " "versions.") + ) + info.append( + _( + "This may indicate an overly constrained set of " + "packages are installed." + ) + ) + info.append(" ") + info.append(_("latest incorporations:")) + info.append(" ") + info.extend((" {0}".format(f) for f in sorted(incorps, key=skey))) + info.append(" ") + + ms = self.__generate_dependency_errors(incorps, excludes=excludes) + ms.extend(self.__check_installed()) + + if ms: + info.append( + _( + "The following indicates why the system " + "cannot update to the latest version:" + ) + ) + info.append(" ") + for s in ms: + info.append(" {0}".format(s)) + else: + info.append( + _("Dependency analysis is unable to " "determine the cause.") + ) + info.append( + _( + "Try running with -vv to " + "obtain more detailed error messages." + ) + ) + + self.__raise_solution_error(no_solution=info) + + def solve_uninstall( + self, + existing_freezes, + uninstall_list, + excludes, + ignore_inst_parent_deps=False, + ): + """Compute changes needed for uninstall""" + + self.__begin_solve() + + # generate list of installed pkgs w/ possible renames removed to + # forestall failing removal due to presence of unneeded renamed + # pkg + orig_installed_set = self.__installed_fmris + renamed_set = orig_installed_set - self.__elide_possible_renames( + orig_installed_set, excludes + ) + + proposed_removals = ( + set(uninstall_list) + | renamed_set + | self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UNINSTALL] + ) + + # find pkgs which are going to be installed/updated + triggered_set = set() + for f in self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UPDATE]: + triggered_set.add(f) + + # check for dependents + for pfmri in proposed_removals: + self.__progress() + dependents = ( + self.__get_dependents(pfmri, excludes) - proposed_removals + ) + + # Check if any of the dependents are going to be updated + # to a different version which might not have the same + # dependency constraints. If so, remove from dependents + # list. + + # Example: + # A@1 depends on B + # A@2 does not depend on B + # + # A@1 is currently installed, B is requested for removal + # -> not allowed + # pkg actuator updates A to 2 + # -> now removal of B is allowed + candidates = dict( + (tf, f) + for f in dependents + for tf in triggered_set + if f.pkg_name == tf.pkg_name + ) + + for tf in candidates: + remove = True + for da in self.__get_dependency_actions(tf, excludes): + if da.attrs["type"] != "require": + continue + pkg_name = pkg.fmri.PkgFmri(da.attrs["fmri"]).pkg_name + if pkg_name == pfmri.pkg_name: + remove = False + break + if remove: + dependents.remove(candidates[tf]) + + if dependents: + raise api_errors.NonLeafPackageException(pfmri, dependents) + + reject_set = set(f.pkg_name for f in proposed_removals) + + # Run it through the solver; with more complex dependencies + # we're going to be out of luck without it. + self.__state = SOLVER_INIT # reset to initial state + return self.solve_install( + existing_freezes, + {}, + excludes=excludes, + reject_set=reject_set, + ignore_inst_parent_deps=ignore_inst_parent_deps, + ) + + def __update_solution_set(self, solution, excludes): + """Update avoid sets w/ any missing packages (due to reject). + Remove obsolete packages from solution. Keep track of which + obsolete packages have group dependencies so verify of group + packages w/ obsolete members works.""" + + solution_stems = set(f.pkg_name for f in solution) + tracked_stems = set() + for fmri in solution: + for a in self.__get_dependency_actions( + fmri, excludes=excludes, trim_invalid=False + ): + if ( + a.attrs["type"] != "group" + and a.attrs["type"] != "group-any" + ): + continue + + for t in a.attrlist("fmri"): + try: + tmp = self.__fmridict[t] + except KeyError: + tmp = pkg.fmri.PkgFmri(t) + self.__fmridict[t] = tmp + tracked_stems.add(tmp.pkg_name) + + avoided = tracked_stems - solution_stems + # Add stems omitted by solution and explicitly rejected. + self.__avoid_set |= avoided & self.__reject_set + + ret = solution.copy() + obs = set() + + for f in solution: + if self.__fmri_is_obsolete(f): + ret.remove(f) + obs.add(f.pkg_name) + + self.__obs_set = obs & tracked_stems + + # Add stems omitted by solution but not explicitly rejected, not + # previously avoided, and not avoided due to obsoletion. + self.__implicit_avoid_set |= avoided - self.__avoid_set - self.__obs_set + + return ret + + def __save_solver(self): + """Duplicate current current solver state and return it.""" + return (self.__addclause_failure, pkg.solver.msat_solver(self.__solver)) + + def __restore_solver(self, solver): + """Set the current solver state to the previously saved one""" + self.__addclause_failure, self.__solver = solver + self.__iterations = 0 + + def __solve(self, older=False, max_iterations=2000): + """Perform iterative solution; try for newest pkgs unless + older=True""" + solution_vector = [] + self.__state = SOLVER_FAIL + eliminated = set() + while not self.__addclause_failure and self.__solver.solve([]): + self.__progress() + self.__iterations += 1 + + if self.__iterations > max_iterations: + break + + solution_vector = self.__get_solution_vector() + if not solution_vector: + break + + # prevent the selection of any older pkgs except for + # those that are part of the set of allowed downgrades; + for fid in solution_vector: + pfmri = self.__getfmri(fid) + matching, remaining = self.__comb_newer_fmris(pfmri) + if not older: + # without subtraction of allowed + # downgrades, an initial solution will + # exclude any solutions containing + # earlier versions of downgradeable + # packages + remove = remaining - self.__allowed_downgrades else: - info.append(_("Dependency analysis is unable to " - "determine the cause.")) - info.append(_("Try running with -vv to " - "obtain more detailed error messages.")) - - self.__raise_solution_error(no_solution=info) - - def solve_uninstall(self, existing_freezes, uninstall_list, excludes, - ignore_inst_parent_deps=False): - """Compute changes needed for uninstall""" - - self.__begin_solve() - - # generate list of installed pkgs w/ possible renames removed to - # forestall failing removal due to presence of unneeded renamed - # pkg - orig_installed_set = self.__installed_fmris - renamed_set = orig_installed_set - \ - self.__elide_possible_renames(orig_installed_set, excludes) - - proposed_removals = set(uninstall_list) | renamed_set | \ - self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UNINSTALL] - - # find pkgs which are going to be installed/updated - triggered_set = set() - for f in self.__triggered_ops[PKG_OP_UNINSTALL][PKG_OP_UPDATE]: - triggered_set.add(f) - - # check for dependents - for pfmri in proposed_removals: - self.__progress() - dependents = self.__get_dependents(pfmri, excludes) - \ - proposed_removals - - # Check if any of the dependents are going to be updated - # to a different version which might not have the same - # dependency constraints. If so, remove from dependents - # list. - - # Example: - # A@1 depends on B - # A@2 does not depend on B - # - # A@1 is currently installed, B is requested for removal - # -> not allowed - # pkg actuator updates A to 2 - # -> now removal of B is allowed - candidates = dict( - (tf, f) - for f in dependents - for tf in triggered_set - if f.pkg_name == tf.pkg_name - ) - - for tf in candidates: - remove = True - for da in self.__get_dependency_actions(tf, - excludes): - if da.attrs["type"] != "require": - continue - pkg_name = pkg.fmri.PkgFmri( - da.attrs["fmri"]).pkg_name - if pkg_name == pfmri.pkg_name: - remove = False - break - if remove: - dependents.remove(candidates[tf]) - - if dependents: - raise api_errors.NonLeafPackageException(pfmri, - dependents) - - reject_set = set(f.pkg_name for f in proposed_removals) - - # Run it through the solver; with more complex dependencies - # we're going to be out of luck without it. - self.__state = SOLVER_INIT # reset to initial state - return self.solve_install(existing_freezes, {}, - excludes=excludes, reject_set=reject_set, - ignore_inst_parent_deps=ignore_inst_parent_deps) - - def __update_solution_set(self, solution, excludes): - """Update avoid sets w/ any missing packages (due to reject). - Remove obsolete packages from solution. Keep track of which - obsolete packages have group dependencies so verify of group - packages w/ obsolete members works.""" - - solution_stems = set(f.pkg_name for f in solution) - tracked_stems = set() - for fmri in solution: - for a in self.__get_dependency_actions(fmri, - excludes=excludes, trim_invalid=False): - if (a.attrs["type"] != "group" and - a.attrs["type"] != "group-any"): - continue - - for t in a.attrlist("fmri"): - try: - tmp = self.__fmridict[t] - except KeyError: - tmp = pkg.fmri.PkgFmri(t) - self.__fmridict[t] = tmp - tracked_stems.add(tmp.pkg_name) - - avoided = (tracked_stems - solution_stems) - # Add stems omitted by solution and explicitly rejected. - self.__avoid_set |= avoided & self.__reject_set - - ret = solution.copy() - obs = set() - - for f in solution: - if self.__fmri_is_obsolete(f): - ret.remove(f) - obs.add(f.pkg_name) - - self.__obs_set = obs & tracked_stems - - # Add stems omitted by solution but not explicitly rejected, not - # previously avoided, and not avoided due to obsoletion. - self.__implicit_avoid_set |= avoided - self.__avoid_set - \ - self.__obs_set - - return ret - - def __save_solver(self): - """Duplicate current current solver state and return it.""" - return (self.__addclause_failure, - pkg.solver.msat_solver(self.__solver)) - - def __restore_solver(self, solver): - """Set the current solver state to the previously saved one""" - self.__addclause_failure, self.__solver = solver - self.__iterations = 0 - - def __solve(self, older=False, max_iterations=2000): - """Perform iterative solution; try for newest pkgs unless - older=True""" - solution_vector = [] - self.__state = SOLVER_FAIL - eliminated = set() - while not self.__addclause_failure and self.__solver.solve([]): - self.__progress() - self.__iterations += 1 - - if self.__iterations > max_iterations: - break - - solution_vector = self.__get_solution_vector() - if not solution_vector: - break - - # prevent the selection of any older pkgs except for - # those that are part of the set of allowed downgrades; - for fid in solution_vector: - pfmri = self.__getfmri(fid) - matching, remaining = \ - self.__comb_newer_fmris(pfmri) - if not older: - # without subtraction of allowed - # downgrades, an initial solution will - # exclude any solutions containing - # earlier versions of downgradeable - # packages - remove = remaining - \ - self.__allowed_downgrades - else: - remove = matching - set([pfmri]) - \ - eliminated - for f in remove: - self.__addclauses([[-self.__getid(f)]]) - - - # prevent the selection of this exact combo; - # permit [] solution - self.__addclauses([[-i for i in solution_vector]]) - - if not self.__iterations: - self.__raise_solution_error(no_solution=True) - - self.__state = SOLVER_SUCCESS - - solution = set([self.__getfmri(i) for i in solution_vector]) - - return solution - - def __get_solution_vector(self): - """Return solution vector from solver""" - return frozenset([ - (i + 1) for i in range(self.__solver.get_variables()) - if self.__solver.dereference(i) - ]) - - def __assign_possible(self, possible_set): - """Assign __possible_dict of possible package FMRIs by pkg stem - and mark trimming complete.""" - - # generate dictionary of possible pkgs fmris by pkg stem - self.__possible_dict.clear() - - for f in possible_set: - self.__possible_dict[f.pkg_name].append(f) - for name in self.__possible_dict: - self.__possible_dict[name].sort() - self.__trimdone = True - - def __assign_fmri_ids(self, possible_set): - """ give a set of possible fmris, assign ids""" - - self.__assign_possible(possible_set) - - # assign clause numbers (ids) to possible pkgs - pkgid = 1 - for name in sorted(six.iterkeys(self.__possible_dict)): - for fmri in reversed(self.__possible_dict[name]): - self.__id2fmri[pkgid] = fmri - self.__fmri2id[fmri] = pkgid - pkgid += 1 - - self.__variables = pkgid - 1 - - def __getid(self, fmri): - """Translate fmri to variable number (id)""" - return self.__fmri2id[fmri] - - def __getfmri(self, fid): - """Translate variable number (id) to fmris""" - return self.__id2fmri[fid] - - def __get_fmris_by_version(self, pkg_name): - """Cache for catalog entries; helps performance""" - if pkg_name not in self.__cache: - self.__cache[pkg_name] = [ - t - for t in self.__catalog.fmris_by_version(pkg_name) - ] - return self.__cache[pkg_name] - - def __get_catalog_fmris(self, pkg_name): - """ return the list of fmris in catalog for this pkg name""" - if pkg_name not in self.__pub_trim: - self.__filter_publishers(pkg_name) - - if self.__trimdone: - return self.__possible_dict.get(pkg_name, []) - - return [ + remove = matching - set([pfmri]) - eliminated + for f in remove: + self.__addclauses([[-self.__getid(f)]]) + + # prevent the selection of this exact combo; + # permit [] solution + self.__addclauses([[-i for i in solution_vector]]) + + if not self.__iterations: + self.__raise_solution_error(no_solution=True) + + self.__state = SOLVER_SUCCESS + + solution = set([self.__getfmri(i) for i in solution_vector]) + + return solution + + def __get_solution_vector(self): + """Return solution vector from solver""" + return frozenset( + [ + (i + 1) + for i in range(self.__solver.get_variables()) + if self.__solver.dereference(i) + ] + ) + + def __assign_possible(self, possible_set): + """Assign __possible_dict of possible package FMRIs by pkg stem + and mark trimming complete.""" + + # generate dictionary of possible pkgs fmris by pkg stem + self.__possible_dict.clear() + + for f in possible_set: + self.__possible_dict[f.pkg_name].append(f) + for name in self.__possible_dict: + self.__possible_dict[name].sort() + self.__trimdone = True + + def __assign_fmri_ids(self, possible_set): + """give a set of possible fmris, assign ids""" + + self.__assign_possible(possible_set) + + # assign clause numbers (ids) to possible pkgs + pkgid = 1 + for name in sorted(six.iterkeys(self.__possible_dict)): + for fmri in reversed(self.__possible_dict[name]): + self.__id2fmri[pkgid] = fmri + self.__fmri2id[fmri] = pkgid + pkgid += 1 + + self.__variables = pkgid - 1 + + def __getid(self, fmri): + """Translate fmri to variable number (id)""" + return self.__fmri2id[fmri] + + def __getfmri(self, fid): + """Translate variable number (id) to fmris""" + return self.__id2fmri[fid] + + def __get_fmris_by_version(self, pkg_name): + """Cache for catalog entries; helps performance""" + if pkg_name not in self.__cache: + self.__cache[pkg_name] = [ + t for t in self.__catalog.fmris_by_version(pkg_name) + ] + return self.__cache[pkg_name] + + def __get_catalog_fmris(self, pkg_name): + """return the list of fmris in catalog for this pkg name""" + if pkg_name not in self.__pub_trim: + self.__filter_publishers(pkg_name) + + if self.__trimdone: + return self.__possible_dict.get(pkg_name, []) + + return [ + f for tp in self.__get_fmris_by_version(pkg_name) for f in tp[1] + ] + + def __comb_newer_fmris(self, fmri, dotrim=True, obsolete_ok=True): + """Returns tuple of set of fmris that are matched within + CONSTRAINT.NONE of specified version and set of remaining + fmris.""" + + return self.__comb_common( + fmri, dotrim, version.CONSTRAINT_NONE, obsolete_ok + ) + + def __comb_common(self, fmri, dotrim, constraint, obsolete_ok): + """Underlying impl. of other comb routines""" + + self.__progress() + + tp = (fmri, dotrim, constraint, obsolete_ok) # cache index + # determine if the data is cacheable or cached: + if (not self.__trimdone and dotrim) or tp not in self.__cache: + # use frozensets so callers don't inadvertently update + # these sets (which may be cached). + all_fmris = set(self.__get_catalog_fmris(fmri.pkg_name)) + matching = frozenset( + [ f - for tp in self.__get_fmris_by_version(pkg_name) - for f in tp[1] + for f in all_fmris + if not dotrim or not self.__trim_dict.get(f) + if not fmri.version + or fmri.version == f.version + or f.version.is_successor( + fmri.version, constraint=constraint + ) + if obsolete_ok or not self.__fmri_is_obsolete(f) ] - - def __comb_newer_fmris(self, fmri, dotrim=True, obsolete_ok=True): - """Returns tuple of set of fmris that are matched within - CONSTRAINT.NONE of specified version and set of remaining - fmris.""" - - return self.__comb_common(fmri, dotrim, - version.CONSTRAINT_NONE, obsolete_ok) - - def __comb_common(self, fmri, dotrim, constraint, obsolete_ok): - """Underlying impl. of other comb routines""" - - self.__progress() - - tp = (fmri, dotrim, constraint, obsolete_ok) # cache index - # determine if the data is cacheable or cached: - if (not self.__trimdone and dotrim) or tp not in self.__cache: - # use frozensets so callers don't inadvertently update - # these sets (which may be cached). - all_fmris = set(self.__get_catalog_fmris(fmri.pkg_name)) - matching = frozenset([ - f - for f in all_fmris - if not dotrim or not self.__trim_dict.get(f) - if not fmri.version or - fmri.version == f.version or - f.version.is_successor(fmri.version, - constraint=constraint) - if obsolete_ok or not self.__fmri_is_obsolete(f) - ]) - remaining = frozenset(all_fmris - matching) - - # if we haven't finished trimming, don't cache this - if not self.__trimdone: - return matching, remaining - # cache the result - self.__cache[tp] = (matching, remaining) - - return self.__cache[tp] - - def __comb_older_fmris(self, fmri, dotrim=True, obsolete_ok=True): - """Returns tuple of set of fmris that are older than - specified version and set of remaining fmris.""" - newer, older = self.__comb_newer_fmris(fmri, dotrim=False, - obsolete_ok=obsolete_ok) - if not dotrim: - return older, newer - - # we're going to return the older packages, so we need - # to make sure that any trimmed packages are removed - # from the matching set and added to the non-matching - # ones. - trimmed_older = set([ - f - for f in older - if self.__trim_dict.get(f) - ]) - return older - trimmed_older, newer | trimmed_older - - def __comb_auto_fmris(self, fmri, dotrim=True, obsolete_ok=True): - """Returns tuple of set of fmris that are match within - CONSTRAINT.AUTO of specified version and set of remaining - fmris.""" - return self.__comb_common(fmri, dotrim, version.CONSTRAINT_AUTO, - obsolete_ok) - - def __fmri_loadstate(self, fmri, excludes): - """load fmri state (obsolete == True, renamed == True)""" - - try: - relevant = dict([ - (a.attrs["name"], a.attrs["value"]) - for a in self.__catalog.get_entry_actions(fmri, - [catalog.Catalog.DEPENDENCY], excludes=excludes) - if a.name == "set" and \ - a.attrs["name"] in ["pkg.renamed", - "pkg.obsolete"] - ]) - except api_errors.InvalidPackageErrors: - # Trim package entries that have unparseable action data - # so that they can be filtered out later. - self.__fmri_state[fmri] = ("false", "false") - self.__trim_unsupported(fmri) - return - - self.__fmri_state[fmri] = ( - relevant.get("pkg.obsolete", "false").lower() == "true", - relevant.get("pkg.renamed", "false").lower() == "true") - - def __fmri_is_obsolete(self, fmri, excludes=EmptyI): - """check to see if fmri is obsolete""" - if fmri not in self.__fmri_state: - self.__fmri_loadstate(fmri, excludes) - return self.__fmri_state[fmri][0] - - def __fmri_is_renamed(self, fmri, excludes=EmptyI): - """check to see if fmri is renamed""" - if fmri not in self.__fmri_state: - self.__fmri_loadstate(fmri, excludes) - return self.__fmri_state[fmri][1] - - def __get_actions(self, fmri, name, excludes=EmptyI, - trim_invalid=True): - """Return list of actions of type 'name' for this 'fmri' in - Catalog.DEPENDENCY section.""" - - try: - return self.__actcache[(fmri, name)] - except KeyError: - pass - - try: - acts = [ - a - for a in self.__catalog.get_entry_actions(fmri, - [catalog.Catalog.DEPENDENCY], excludes=excludes) - if a.name == name - ] - - if name == "depend": - for a in acts: - if a.attrs["type"] in dep_types: - continue - raise api_errors.InvalidPackageErrors([ - "Unknown dependency type {0}". - format(a.attrs["type"])]) - - self.__actcache[(fmri, name)] = acts - return acts - except api_errors.InvalidPackageErrors: - if not trim_invalid: - raise - - # Trim package entries that have unparseable action - # data so that they can be filtered out later. - self.__fmri_state[fmri] = ("false", "false") - self.__trim_unsupported(fmri) - return [] - - def __get_dependency_actions(self, fmri, excludes=EmptyI, - trim_invalid=True): - """Return list of all dependency actions for this fmri.""" - - return self.__get_actions(fmri, "depend", - excludes=excludes, trim_invalid=trim_invalid) - - def __get_set_actions(self, fmri, excludes=EmptyI, - trim_invalid=True): - """Return list of all set actions for this fmri in - Catalog.DEPENDENCY section.""" - - return self.__get_actions(fmri, "set", - excludes=excludes, trim_invalid=trim_invalid) - - def __get_variant_dict(self, fmri): - """Return dictionary of variants suppported by fmri""" - try: - if fmri not in self.__variant_dict: - self.__variant_dict[fmri] = dict( - self.__catalog.get_entry_all_variants(fmri)) - except api_errors.InvalidPackageErrors: - # Trim package entries that have unparseable action data - # so that they can be filtered out later. - self.__variant_dict[fmri] = {} - self.__trim_unsupported(fmri) - return self.__variant_dict[fmri] - - def __is_explicit_install(self, fmri): - """check if given fmri has explicit install actions.""" - - for sa in self.__get_set_actions(fmri): - if sa.attrs["name"] == "pkg.depend.explicit-install" \ - and sa.attrs["value"].lower() == "true": - return True - return False - - def __filter_explicit_install(self, fmri, excludes): - """Check packages which have 'pkg.depend.explicit-install' - action set to true, and prepare to filter.""" - - will_filter = True - # Filter out fmris with 'pkg.depend.explicit-install' set to - # true and not explicitly proposed, already installed in the - # current image, or is parent-constrained and is installed in - # the parent image. - if self.__is_explicit_install(fmri): - pkg_name = fmri.pkg_name - if pkg_name in self.__expl_install_dict and \ - fmri in self.__expl_install_dict[pkg_name]: - will_filter = False - elif pkg_name in self.__installed_dict: - will_filter = False - elif pkg_name in self.__parent_dict: - # If this is a linked package that is - # constrained to be the same version as parent, - # and the parent has it installed, ignore - # pkg.depend.explicit-install so that IDR - # versions of packages can be used - # automatically. - will_filter = not any( - da - for da in self.__get_dependency_actions( - fmri, excludes) - if da.attrs["type"] == "parent" and - pkg.actions.depend.DEPEND_SELF in - da.attrlist("fmri") - ) - else: - will_filter = False - return will_filter - - def __generate_dependency_closure(self, fmri_set, excludes=EmptyI, - dotrim=True, full_trim=False, filter_explicit=True, - proposed_dict=None): - """return set of all fmris the set of specified fmris could - depend on; while trimming those packages that cannot be - installed""" - - # Use a copy of the set provided by the caller to prevent - # unexpected modification! - needs_processing = set(fmri_set) - already_processed = set() - - while needs_processing: - self.__progress() - fmri = needs_processing.pop() - already_processed.add(fmri) - # Trim filtered packages. - if filter_explicit and \ - self.__filter_explicit_install(fmri, excludes): - reason = (N_("Uninstalled fmri {0} can " - "only be installed if explicitly " - "requested"), (fmri,)) - self.__trim((fmri,), _TRIM_EXPLICIT_INSTALL, - reason) - continue - - needs_processing |= (self.__generate_dependencies(fmri, - excludes, dotrim, full_trim, - proposed_dict=proposed_dict) - already_processed) - return already_processed - - def __generate_dependencies(self, fmri, excludes=EmptyI, dotrim=True, - full_trim=False, proposed_dict=None): - """return set of direct (possible) dependencies of this pkg; - trim those packages whose dependencies cannot be satisfied""" - try: - return set([ - f - for da in self.__get_dependency_actions(fmri, - excludes) - # check most common ones first; what is checked - # here is a matter of optimization / messaging, not - # correctness. - if da.attrs["type"] == "require" or - da.attrs["type"] == "group" or - da.attrs["type"] == "conditional" or - da.attrs["type"] == "require-any" or - da.attrs["type"] == "group-any" or - (full_trim and ( - da.attrs["type"] == "incorporate" or - da.attrs["type"] == "optional" or - da.attrs["type"] == "exclude")) - for f in self.__parse_dependency(da, fmri, - dotrim, check_req=True, - proposed_dict=proposed_dict)[1] - ]) - - except DependencyException as e: - self.__trim((fmri,), e.reason_id, e.reason, - fmri_adds=e.fmris) - return set([]) - - def __elide_possible_renames(self, fmris, excludes=EmptyI): - """Return fmri list (which must be self-complete) with all - renamed fmris that have no other fmris depending on them - removed""" - - # figure out which have been renamed - renamed_fmris = set([ - pfmri - for pfmri in fmris - if self.__fmri_is_renamed(pfmri, excludes) - ]) - - # return if nothing has been renamed - if not renamed_fmris: - return set(fmris) - - fmris_by_name = dict( - (pfmri.pkg_name, pfmri) - for pfmri in fmris + ) + remaining = frozenset(all_fmris - matching) + + # if we haven't finished trimming, don't cache this + if not self.__trimdone: + return matching, remaining + # cache the result + self.__cache[tp] = (matching, remaining) + + return self.__cache[tp] + + def __comb_older_fmris(self, fmri, dotrim=True, obsolete_ok=True): + """Returns tuple of set of fmris that are older than + specified version and set of remaining fmris.""" + newer, older = self.__comb_newer_fmris( + fmri, dotrim=False, obsolete_ok=obsolete_ok + ) + if not dotrim: + return older, newer + + # we're going to return the older packages, so we need + # to make sure that any trimmed packages are removed + # from the matching set and added to the non-matching + # ones. + trimmed_older = set([f for f in older if self.__trim_dict.get(f)]) + return older - trimmed_older, newer | trimmed_older + + def __comb_auto_fmris(self, fmri, dotrim=True, obsolete_ok=True): + """Returns tuple of set of fmris that are match within + CONSTRAINT.AUTO of specified version and set of remaining + fmris.""" + return self.__comb_common( + fmri, dotrim, version.CONSTRAINT_AUTO, obsolete_ok + ) + + def __fmri_loadstate(self, fmri, excludes): + """load fmri state (obsolete == True, renamed == True)""" + + try: + relevant = dict( + [ + (a.attrs["name"], a.attrs["value"]) + for a in self.__catalog.get_entry_actions( + fmri, [catalog.Catalog.DEPENDENCY], excludes=excludes + ) + if a.name == "set" + and a.attrs["name"] in ["pkg.renamed", "pkg.obsolete"] + ] + ) + except api_errors.InvalidPackageErrors: + # Trim package entries that have unparseable action data + # so that they can be filtered out later. + self.__fmri_state[fmri] = ("false", "false") + self.__trim_unsupported(fmri) + return + + self.__fmri_state[fmri] = ( + relevant.get("pkg.obsolete", "false").lower() == "true", + relevant.get("pkg.renamed", "false").lower() == "true", + ) + + def __fmri_is_obsolete(self, fmri, excludes=EmptyI): + """check to see if fmri is obsolete""" + if fmri not in self.__fmri_state: + self.__fmri_loadstate(fmri, excludes) + return self.__fmri_state[fmri][0] + + def __fmri_is_renamed(self, fmri, excludes=EmptyI): + """check to see if fmri is renamed""" + if fmri not in self.__fmri_state: + self.__fmri_loadstate(fmri, excludes) + return self.__fmri_state[fmri][1] + + def __get_actions(self, fmri, name, excludes=EmptyI, trim_invalid=True): + """Return list of actions of type 'name' for this 'fmri' in + Catalog.DEPENDENCY section.""" + + try: + return self.__actcache[(fmri, name)] + except KeyError: + pass + + try: + acts = [ + a + for a in self.__catalog.get_entry_actions( + fmri, [catalog.Catalog.DEPENDENCY], excludes=excludes ) + if a.name == name + ] + + if name == "depend": + for a in acts: + if a.attrs["type"] in dep_types: + continue + raise api_errors.InvalidPackageErrors( + ["Unknown dependency type {0}".format(a.attrs["type"])] + ) - # figure out which renamed fmris have dependencies; compute - # transitively so we can handle multiple renames - - needs_processing = set(fmris) - renamed_fmris - already_processed = set() - - while needs_processing: - pfmri = needs_processing.pop() - already_processed.add(pfmri) - for da in self.__get_dependency_actions( - pfmri, excludes): - if da.attrs["type"] not in \ - ("incorporate", "optional", "origin"): - for f in da.attrlist("fmri"): - try: - tmp = self.__fmridict[f] - except KeyError: - tmp = \ - pkg.fmri.PkgFmri(f) - self.__fmridict[f] = tmp - name = tmp.pkg_name - if name not in fmris_by_name: - continue - new_fmri = fmris_by_name[name] - # since new_fmri will not be - # treated as renamed, make sure - # we check any dependencies it - # has - if new_fmri not in \ - already_processed: - needs_processing.add( - new_fmri) - renamed_fmris.discard(new_fmri) - return set(fmris) - renamed_fmris - - - def __get_dependents(self, pfmri, excludes=EmptyI): - """return set of installed fmris that have require dependencies - on specified installed fmri""" - if self.__dependents is None: - self.__dependents = {} - for f in self.__installed_fmris: - for da in self.__get_dependency_actions(f, - excludes): - if da.attrs["type"] != "require": - continue - pkg_name = pkg.fmri.PkgFmri( - da.attrs["fmri"]).pkg_name - self.__dependents.setdefault( - self.__installed_dict[pkg_name], - set()).add(f) - return self.__dependents.get(pfmri, set()) - - def __trim_recursive_incorps(self, fmri_list, excludes, reason_id): - """trim packages affected by incorporations""" - processed = set() - - work = [fmri_list] - - if reason_id == _TRIM_PROPOSED_INC: - reason = N_( - "Excluded by proposed incorporation '{0}'") - elif reason_id == _TRIM_SYNCED_INC: - reason = N_( - "Excluded by synced parent incorporation '{0}'") - else: - raise AssertionError( - "Invalid reason_id value: {0}".format(reason_id)) - - while work: - fmris = work.pop() - enc_pkg_name = fmris[0].get_name() - # If the package is not installed then any dependenices - # it has are irrelevant. - if enc_pkg_name not in self.__installed_dict: - continue - processed.add(frozenset(fmris)) - d = self.__combine_incorps(fmris, excludes) - for name in d: - self.__trim(d[name][1], reason_id, - (reason, (fmris[0].pkg_name,))) - to_do = d[name][0] - if to_do and frozenset(to_do) not in processed: - work.append(list(to_do)) - - def __combine_incorps(self, fmri_list, excludes): - """Given a list of fmris, one of which must be present, produce - a dictionary indexed by package name, which contains a tuple - of two sets (matching fmris, nonmatching)""" - - dict_list = [ - self.__get_incorp_nonmatch_dict(f, excludes) - for f in fmri_list + self.__actcache[(fmri, name)] = acts + return acts + except api_errors.InvalidPackageErrors: + if not trim_invalid: + raise + + # Trim package entries that have unparseable action + # data so that they can be filtered out later. + self.__fmri_state[fmri] = ("false", "false") + self.__trim_unsupported(fmri) + return [] + + def __get_dependency_actions( + self, fmri, excludes=EmptyI, trim_invalid=True + ): + """Return list of all dependency actions for this fmri.""" + + return self.__get_actions( + fmri, "depend", excludes=excludes, trim_invalid=trim_invalid + ) + + def __get_set_actions(self, fmri, excludes=EmptyI, trim_invalid=True): + """Return list of all set actions for this fmri in + Catalog.DEPENDENCY section.""" + + return self.__get_actions( + fmri, "set", excludes=excludes, trim_invalid=trim_invalid + ) + + def __get_variant_dict(self, fmri): + """Return dictionary of variants suppported by fmri""" + try: + if fmri not in self.__variant_dict: + self.__variant_dict[fmri] = dict( + self.__catalog.get_entry_all_variants(fmri) + ) + except api_errors.InvalidPackageErrors: + # Trim package entries that have unparseable action data + # so that they can be filtered out later. + self.__variant_dict[fmri] = {} + self.__trim_unsupported(fmri) + return self.__variant_dict[fmri] + + def __is_explicit_install(self, fmri): + """check if given fmri has explicit install actions.""" + + for sa in self.__get_set_actions(fmri): + if ( + sa.attrs["name"] == "pkg.depend.explicit-install" + and sa.attrs["value"].lower() == "true" + ): + return True + return False + + def __filter_explicit_install(self, fmri, excludes): + """Check packages which have 'pkg.depend.explicit-install' + action set to true, and prepare to filter.""" + + will_filter = True + # Filter out fmris with 'pkg.depend.explicit-install' set to + # true and not explicitly proposed, already installed in the + # current image, or is parent-constrained and is installed in + # the parent image. + if self.__is_explicit_install(fmri): + pkg_name = fmri.pkg_name + if ( + pkg_name in self.__expl_install_dict + and fmri in self.__expl_install_dict[pkg_name] + ): + will_filter = False + elif pkg_name in self.__installed_dict: + will_filter = False + elif pkg_name in self.__parent_dict: + # If this is a linked package that is + # constrained to be the same version as parent, + # and the parent has it installed, ignore + # pkg.depend.explicit-install so that IDR + # versions of packages can be used + # automatically. + will_filter = not any( + da + for da in self.__get_dependency_actions(fmri, excludes) + if da.attrs["type"] == "parent" + and pkg.actions.depend.DEPEND_SELF in da.attrlist("fmri") + ) + else: + will_filter = False + return will_filter + + def __generate_dependency_closure( + self, + fmri_set, + excludes=EmptyI, + dotrim=True, + full_trim=False, + filter_explicit=True, + proposed_dict=None, + ): + """return set of all fmris the set of specified fmris could + depend on; while trimming those packages that cannot be + installed""" + + # Use a copy of the set provided by the caller to prevent + # unexpected modification! + needs_processing = set(fmri_set) + already_processed = set() + + while needs_processing: + self.__progress() + fmri = needs_processing.pop() + already_processed.add(fmri) + # Trim filtered packages. + if filter_explicit and self.__filter_explicit_install( + fmri, excludes + ): + reason = ( + N_( + "Uninstalled fmri {0} can " + "only be installed if explicitly " + "requested" + ), + (fmri,), + ) + self.__trim((fmri,), _TRIM_EXPLICIT_INSTALL, reason) + continue + + needs_processing |= ( + self.__generate_dependencies( + fmri, + excludes, + dotrim, + full_trim, + proposed_dict=proposed_dict, + ) + - already_processed + ) + return already_processed + + def __generate_dependencies( + self, + fmri, + excludes=EmptyI, + dotrim=True, + full_trim=False, + proposed_dict=None, + ): + """return set of direct (possible) dependencies of this pkg; + trim those packages whose dependencies cannot be satisfied""" + try: + return set( + [ + f + for da in self.__get_dependency_actions(fmri, excludes) + # check most common ones first; what is checked + # here is a matter of optimization / messaging, not + # correctness. + if da.attrs["type"] == "require" + or da.attrs["type"] == "group" + or da.attrs["type"] == "conditional" + or da.attrs["type"] == "require-any" + or da.attrs["type"] == "group-any" + or ( + full_trim + and ( + da.attrs["type"] == "incorporate" + or da.attrs["type"] == "optional" + or da.attrs["type"] == "exclude" + ) + ) + for f in self.__parse_dependency( + da, + fmri, + dotrim, + check_req=True, + proposed_dict=proposed_dict, + )[1] ] - # The following ignores constraints that appear in only some of - # the versions. This also handles obsoletions & renames. - all_keys = reduce(set.intersection, - (set(d.keys()) for d in dict_list)) - - return dict( - (k, - (reduce(set.union, - (d.get(k, (set(), set()))[0] - for d in dict_list)), - reduce(set.intersection, - (d.get(k, (set(), set()))[1] - for d in dict_list)))) - for k in all_keys + ) + + except DependencyException as e: + self.__trim((fmri,), e.reason_id, e.reason, fmri_adds=e.fmris) + return set([]) + + def __elide_possible_renames(self, fmris, excludes=EmptyI): + """Return fmri list (which must be self-complete) with all + renamed fmris that have no other fmris depending on them + removed""" + + # figure out which have been renamed + renamed_fmris = set( + [ + pfmri + for pfmri in fmris + if self.__fmri_is_renamed(pfmri, excludes) + ] + ) + + # return if nothing has been renamed + if not renamed_fmris: + return set(fmris) + + fmris_by_name = dict((pfmri.pkg_name, pfmri) for pfmri in fmris) + + # figure out which renamed fmris have dependencies; compute + # transitively so we can handle multiple renames + + needs_processing = set(fmris) - renamed_fmris + already_processed = set() + + while needs_processing: + pfmri = needs_processing.pop() + already_processed.add(pfmri) + for da in self.__get_dependency_actions(pfmri, excludes): + if da.attrs["type"] not in ( + "incorporate", + "optional", + "origin", + ): + for f in da.attrlist("fmri"): + try: + tmp = self.__fmridict[f] + except KeyError: + tmp = pkg.fmri.PkgFmri(f) + self.__fmridict[f] = tmp + name = tmp.pkg_name + if name not in fmris_by_name: + continue + new_fmri = fmris_by_name[name] + # since new_fmri will not be + # treated as renamed, make sure + # we check any dependencies it + # has + if new_fmri not in already_processed: + needs_processing.add(new_fmri) + renamed_fmris.discard(new_fmri) + return set(fmris) - renamed_fmris + + def __get_dependents(self, pfmri, excludes=EmptyI): + """return set of installed fmris that have require dependencies + on specified installed fmri""" + if self.__dependents is None: + self.__dependents = {} + for f in self.__installed_fmris: + for da in self.__get_dependency_actions(f, excludes): + if da.attrs["type"] != "require": + continue + pkg_name = pkg.fmri.PkgFmri(da.attrs["fmri"]).pkg_name + self.__dependents.setdefault( + self.__installed_dict[pkg_name], set() + ).add(f) + return self.__dependents.get(pfmri, set()) + + def __trim_recursive_incorps(self, fmri_list, excludes, reason_id): + """trim packages affected by incorporations""" + processed = set() + + work = [fmri_list] + + if reason_id == _TRIM_PROPOSED_INC: + reason = N_("Excluded by proposed incorporation '{0}'") + elif reason_id == _TRIM_SYNCED_INC: + reason = N_("Excluded by synced parent incorporation '{0}'") + else: + raise AssertionError( + "Invalid reason_id value: {0}".format(reason_id) + ) + + while work: + fmris = work.pop() + enc_pkg_name = fmris[0].get_name() + # If the package is not installed then any dependenices + # it has are irrelevant. + if enc_pkg_name not in self.__installed_dict: + continue + processed.add(frozenset(fmris)) + d = self.__combine_incorps(fmris, excludes) + for name in d: + self.__trim( + d[name][1], reason_id, (reason, (fmris[0].pkg_name,)) ) - - - def __get_incorp_nonmatch_dict(self, fmri, excludes): - """Given a fmri with incorporation dependencies, produce a - dictionary containing (matching, non matching fmris), - indexed by pkg name. Note that some fmris may be - incorporated more than once at different levels of - specificity""" - ret = dict() - for da in self.__get_dependency_actions(fmri, - excludes=excludes): - if da.attrs["type"] != "incorporate": - continue - nm, m, _c, _d, _r, f = self.__parse_dependency(da, fmri, - dotrim=False) - # Collect all incorp. dependencies affecting - # a package in a list. Note that it is - # possible for both matching and non-matching - # sets to be NULL, and we'll need at least - # one item in the list for reduce to work. - ret.setdefault(f.pkg_name, (list(), list())) - ret[f.pkg_name][0].append(set(m)) - ret[f.pkg_name][1].append(set(nm)) - - # For each of the packages constrained, combine multiple - # incorporation dependencies. Matches are intersected, - # non-matches form a union. - for pkg_name in ret: - ret[pkg_name] = ( - reduce(set.intersection, ret[pkg_name][0]), - reduce(set.union, ret[pkg_name][1])) - return ret - - def __parse_group_dependency(self, dotrim, obsolete_ok, fmris): - """Returns (matching, nonmatching) fmris for given list of group - dependencies.""" - + to_do = d[name][0] + if to_do and frozenset(to_do) not in processed: + work.append(list(to_do)) + + def __combine_incorps(self, fmri_list, excludes): + """Given a list of fmris, one of which must be present, produce + a dictionary indexed by package name, which contains a tuple + of two sets (matching fmris, nonmatching)""" + + dict_list = [ + self.__get_incorp_nonmatch_dict(f, excludes) for f in fmri_list + ] + # The following ignores constraints that appear in only some of + # the versions. This also handles obsoletions & renames. + all_keys = reduce(set.intersection, (set(d.keys()) for d in dict_list)) + + return dict( + ( + k, + ( + reduce( + set.union, + (d.get(k, (set(), set()))[0] for d in dict_list), + ), + reduce( + set.intersection, + (d.get(k, (set(), set()))[1] for d in dict_list), + ), + ), + ) + for k in all_keys + ) + + def __get_incorp_nonmatch_dict(self, fmri, excludes): + """Given a fmri with incorporation dependencies, produce a + dictionary containing (matching, non matching fmris), + indexed by pkg name. Note that some fmris may be + incorporated more than once at different levels of + specificity""" + ret = dict() + for da in self.__get_dependency_actions(fmri, excludes=excludes): + if da.attrs["type"] != "incorporate": + continue + nm, m, _c, _d, _r, f = self.__parse_dependency( + da, fmri, dotrim=False + ) + # Collect all incorp. dependencies affecting + # a package in a list. Note that it is + # possible for both matching and non-matching + # sets to be NULL, and we'll need at least + # one item in the list for reduce to work. + ret.setdefault(f.pkg_name, (list(), list())) + ret[f.pkg_name][0].append(set(m)) + ret[f.pkg_name][1].append(set(nm)) + + # For each of the packages constrained, combine multiple + # incorporation dependencies. Matches are intersected, + # non-matches form a union. + for pkg_name in ret: + ret[pkg_name] = ( + reduce(set.intersection, ret[pkg_name][0]), + reduce(set.union, ret[pkg_name][1]), + ) + return ret + + def __parse_group_dependency(self, dotrim, obsolete_ok, fmris): + """Returns (matching, nonmatching) fmris for given list of group + dependencies.""" + + matching = [] + nonmatching = [] + for f in fmris: + # remove version explicitly; don't + # modify cached fmri + if f.version is not None: + fmri = f.copy() + fmri.version = None + else: + fmri = f + + m, nm = self.__comb_newer_fmris( + fmri, dotrim, obsolete_ok=obsolete_ok + ) + matching.extend(m) + nonmatching.extend(nm) + + return frozenset(matching), frozenset(nonmatching) + + def __parse_dependency( + self, + dependency_action, + source, + dotrim=True, + check_req=False, + proposed_dict=None, + ): + """Return tuple of (disallowed fmri list, allowed fmri list, + conditional_list, dependency_type, required)""" + + dtype = dependency_action.attrs["type"] + fmris = [] + for fmristr in dependency_action.attrlist("fmri"): + try: + fmri = self.__fmridict[fmristr] + except KeyError: + fmri = pkg.fmri.PkgFmri(fmristr) + self.__fmridict[fmristr] = fmri + + if not self.__depend_ts: + fver = fmri.version + if fver and fver.timestr: + # Include timestamp in all error + # output for dependencies. + self.__depend_ts = True + + fmris.append(fmri) + + fmri = fmris[0] + + # true if match is required for containing pkg + required = True + # if this dependency has conditional fmris + conditional = None + # true if obsolete pkgs satisfy this dependency + obsolete_ok = False + + if dtype == "require": + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim, obsolete_ok=obsolete_ok + ) + + elif dtype == "optional": + obsolete_ok = True + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim, obsolete_ok=obsolete_ok + ) + if fmri.pkg_name not in self.__req_pkg_names: + required = False + + elif dtype == "exclude": + obsolete_ok = True + matching, nonmatching = self.__comb_older_fmris( + fmri, dotrim, obsolete_ok=obsolete_ok + ) + if fmri.pkg_name not in self.__req_pkg_names: + required = False + + elif dtype == "incorporate": + obsolete_ok = True + matching, nonmatching = self.__comb_auto_fmris( + fmri, dotrim, obsolete_ok=obsolete_ok + ) + if fmri.pkg_name not in self.__req_pkg_names: + required = False + # Track packages that deliver incorporate deps. + self.__known_incs.add(source.pkg_name) + + elif dtype == "conditional": + cond_fmri = pkg.fmri.PkgFmri(dependency_action.attrs["predicate"]) + conditional, nonmatching = self.__comb_newer_fmris( + cond_fmri, dotrim, obsolete_ok=obsolete_ok + ) + + # Required is only really helpful for solver error + # messaging. The only time we know that this dependency + # is required is when the predicate package must be part + # of the solution. + if cond_fmri.pkg_name not in self.__req_pkg_names: + required = False + + proposed = ( + proposed_dict[cond_fmri.pkg_name] + if proposed_dict and cond_fmri.pkg_name in proposed_dict + else [] + ) + + # If the predicate is not installed and not in the + # proposed set, then the dependant package is not + # required. + installed = False + for f in conditional: + if ( + f in proposed + or f in self.__installed_fmris - self.__removal_fmris + ): + installed = True + if not installed: + required = False + + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim, obsolete_ok=obsolete_ok + ) + + elif dtype == "require-any": + matching = [] + nonmatching = [] + for f in fmris: + m, nm = self.__comb_newer_fmris( + f, dotrim, obsolete_ok=obsolete_ok + ) + matching.extend(m) + nonmatching.extend(nm) + + matching = set(matching) + nonmatching = set(nonmatching) + + elif dtype == "parent": + # Parent dependency fmris must exist outside of the + # current image, so we don't report any new matching + # or nonmatching requirements for the solver. + matching = nonmatching = frozenset() + required = False + + elif dtype == "origin": + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim=False, obsolete_ok=obsolete_ok + ) + required = False + + elif dtype == "group" or dtype == "group-any": + obsolete_ok = True + # Determine potential fmris for matching. + potential = [ + fmri + for fmri in fmris + if not ( + fmri.pkg_name in self.__avoid_set + or fmri.pkg_name in self.__reject_set + ) + ] + required = len(potential) > 0 + + # Determine matching fmris. + matching = nonmatching = frozenset() + if required: + matching, nonmatching = self.__parse_group_dependency( + dotrim, obsolete_ok, potential + ) + if not matching and not nonmatching: + # No possible stems at all? Ignore + # dependency. + required = False + + # If more than one stem matched, prefer stems for which + # no obsoletion exists. + mstems = frozenset(f.pkg_name for f in matching) + if required and len(mstems) > 1: + ostems = set() + ofmris = set() + for f in matching: + if self.__fmri_is_obsolete(f): + ostems.add(f.pkg_name) + ofmris.add(f) + + # If not all matching stems had an obsolete + # version, remove the obsolete fmris from + # consideration. This makes the assumption that + # at least one of the remaining, non-obsolete + # stems will be installable. If that is not + # true, the solver may not find anything to do, + # or may not find a solution if the system is + # overly constrained. This is believed + # unlikely, so seems a reasonable compromise. + # In that scenario, a client can move forward by + # using --reject to remove the related group + # dependencies. + if mstems - ostems: + matching -= ofmris + nonmatching |= ofmris + + else: # only way this happens is if new type is incomplete + raise api_errors.InvalidPackageErrors( + ["Unknown dependency type {0}".format(dtype)] + ) + + # check if we're throwing exceptions and we didn't find any + # matches on a required package + if not check_req or matching or not required: + return (nonmatching, matching, conditional, dtype, required, fmri) + elif dotrim and source in self.__inc_list and dtype == "incorporate": + # This is an incorporation package that will not be + # removed, so if dependencies can't be satisfied, try + # again with dotrim=False to ignore rejections due to + # proposed packages. + return self.__parse_dependency( + dependency_action, + source, + dotrim=False, + check_req=check_req, + proposed_dict=proposed_dict, + ) + + # Neither build or publisher is interesting for dependencies. + fstr = fmri.get_fmri( + anarchy=True, include_build=False, include_scheme=False + ) + + # we're going to toss an exception + if dtype == "exclude": + # If we reach this point, we know that a required + # package (already installed or proposed) was excluded. + matching, nonmatching = self.__comb_older_fmris( + fmri, dotrim=False, obsolete_ok=False + ) + + # Determine if excluded package is already installed. + installed = False + for f in nonmatching: + if f in self.__installed_fmris: + installed = True + break + + if not matching and installed: + # The exclude dependency doesn't allow the + # version of the package that is already + # installed. + raise DependencyException( + _TRIM_INSTALLED_EXCLUDE, + ( + N_( + "Package contains 'exclude' dependency " + "{0} on installed package" + ), + (fstr,), + ), + ) + elif not matching and not installed: + # The exclude dependency doesn't allow any + # version of the package that is proposed. + raise DependencyException( + _TRIM_INSTALLED_EXCLUDE, + ( + N_( + "Package contains 'exclude' dependency " + "{0} on proposed package" + ), + (fstr,), + ), + ) + else: + # All versions of the package allowed by the + # exclude dependency were trimmed by other + # dependencies. If changed, update _fmri_errors + # _TRIM_DEP_TRIMMED. + raise DependencyException( + _TRIM_DEP_TRIMMED, + ( + N_( + "No version allowed by 'exclude' " + "dependency {0} could be installed" + ), + (fstr,), + ), + matching, + ) + # not reached + elif dtype == "incorporate": + matching, nonmatching = self.__comb_auto_fmris( + fmri, dotrim=False, obsolete_ok=obsolete_ok + ) + + # check if allowing obsolete packages helps + + elif not obsolete_ok: + # see if allowing obsolete pkgs gets us some matches + if len(fmris) == 1: + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim, obsolete_ok=True + ) + else: matching = [] nonmatching = [] for f in fmris: - # remove version explicitly; don't - # modify cached fmri - if f.version is not None: - fmri = f.copy() - fmri.version = None - else: - fmri = f - - m, nm = self.__comb_newer_fmris(fmri, - dotrim, obsolete_ok=obsolete_ok) - matching.extend(m) - nonmatching.extend(nm) - - return frozenset(matching), frozenset(nonmatching) - - def __parse_dependency(self, dependency_action, source, - dotrim=True, check_req=False, proposed_dict=None): - """Return tuple of (disallowed fmri list, allowed fmri list, - conditional_list, dependency_type, required)""" - - dtype = dependency_action.attrs["type"] - fmris = [] - for fmristr in dependency_action.attrlist("fmri"): - try: - fmri = self.__fmridict[fmristr] - except KeyError: - fmri = pkg.fmri.PkgFmri(fmristr) - self.__fmridict[fmristr] = fmri - - if not self.__depend_ts: - fver = fmri.version - if fver and fver.timestr: - # Include timestamp in all error - # output for dependencies. - self.__depend_ts = True - - fmris.append(fmri) - - fmri = fmris[0] - - # true if match is required for containing pkg - required = True - # if this dependency has conditional fmris - conditional = None - # true if obsolete pkgs satisfy this dependency - obsolete_ok = False - - if dtype == "require": - matching, nonmatching = \ - self.__comb_newer_fmris(fmri, dotrim, - obsolete_ok=obsolete_ok) - - elif dtype == "optional": - obsolete_ok = True - matching, nonmatching = \ - self.__comb_newer_fmris(fmri, dotrim, - obsolete_ok=obsolete_ok) - if fmri.pkg_name not in self.__req_pkg_names: - required = False - - elif dtype == "exclude": - obsolete_ok = True - matching, nonmatching = \ - self.__comb_older_fmris(fmri, dotrim, - obsolete_ok=obsolete_ok) - if fmri.pkg_name not in self.__req_pkg_names: - required = False - - elif dtype == "incorporate": - obsolete_ok = True - matching, nonmatching = \ - self.__comb_auto_fmris(fmri, dotrim, - obsolete_ok=obsolete_ok) - if fmri.pkg_name not in self.__req_pkg_names: - required = False - # Track packages that deliver incorporate deps. - self.__known_incs.add(source.pkg_name) - - elif dtype == "conditional": - cond_fmri = pkg.fmri.PkgFmri( - dependency_action.attrs["predicate"]) - conditional, nonmatching = self.__comb_newer_fmris( - cond_fmri, dotrim, obsolete_ok=obsolete_ok) - - # Required is only really helpful for solver error - # messaging. The only time we know that this dependency - # is required is when the predicate package must be part - # of the solution. - if cond_fmri.pkg_name not in self.__req_pkg_names: - required = False - - proposed = ( - proposed_dict[cond_fmri.pkg_name] - if proposed_dict and - cond_fmri.pkg_name in proposed_dict - else [] - ) - - # If the predicate is not installed and not in the - # proposed set, then the dependant package is not - # required. - installed = False - for f in conditional: - if (f in proposed or - f in self.__installed_fmris - - self.__removal_fmris): - installed = True - if not installed: - required = False - - matching, nonmatching = \ - self.__comb_newer_fmris(fmri, dotrim, - obsolete_ok=obsolete_ok) - - elif dtype == "require-any": - matching = [] - nonmatching = [] - for f in fmris: - m, nm = self.__comb_newer_fmris(f, dotrim, - obsolete_ok=obsolete_ok) - matching.extend(m) - nonmatching.extend(nm) - - matching = set(matching) - nonmatching = set(nonmatching) - - elif dtype == "parent": - # Parent dependency fmris must exist outside of the - # current image, so we don't report any new matching - # or nonmatching requirements for the solver. - matching = nonmatching = frozenset() - required = False - - elif dtype == "origin": - matching, nonmatching = \ - self.__comb_newer_fmris(fmri, dotrim=False, - obsolete_ok=obsolete_ok) - required = False - - elif dtype == "group" or dtype == "group-any": - obsolete_ok = True - # Determine potential fmris for matching. - potential = [ - fmri - for fmri in fmris - if not (fmri.pkg_name in self.__avoid_set or - fmri.pkg_name in self.__reject_set) + m, nm = self.__comb_newer_fmris(f, dotrim, obsolete_ok=True) + matching.extend(m) + nonmatching.extend(nm) + if matching: + if len(fmris) == 1: + raise DependencyException( + _TRIM_DEP_OBSOLETE, + ( + N_( + "All acceptable versions of " + "'{0}' dependency on {1} are " + "obsolete" + ), + (dtype, fstr), + ), + ) + else: + sfmris = frozenset( + [ + fmri.get_fmri( + anarchy=True, + include_build=False, + include_scheme=False, + ) + for f in fmris ] - required = len(potential) > 0 - - # Determine matching fmris. - matching = nonmatching = frozenset() - if required: - matching, nonmatching = \ - self.__parse_group_dependency(dotrim, - obsolete_ok, potential) - if not matching and not nonmatching: - # No possible stems at all? Ignore - # dependency. - required = False - - # If more than one stem matched, prefer stems for which - # no obsoletion exists. - mstems = frozenset(f.pkg_name for f in matching) - if required and len(mstems) > 1: - ostems = set() - ofmris = set() - for f in matching: - if self.__fmri_is_obsolete(f): - ostems.add(f.pkg_name) - ofmris.add(f) - - # If not all matching stems had an obsolete - # version, remove the obsolete fmris from - # consideration. This makes the assumption that - # at least one of the remaining, non-obsolete - # stems will be installable. If that is not - # true, the solver may not find anything to do, - # or may not find a solution if the system is - # overly constrained. This is believed - # unlikely, so seems a reasonable compromise. - # In that scenario, a client can move forward by - # using --reject to remove the related group - # dependencies. - if mstems - ostems: - matching -= ofmris - nonmatching |= ofmris - - else: # only way this happens is if new type is incomplete - raise api_errors.InvalidPackageErrors([ - "Unknown dependency type {0}".format(dtype)]) - - # check if we're throwing exceptions and we didn't find any - # matches on a required package - if not check_req or matching or not required: - return (nonmatching, matching, conditional, dtype, - required, fmri) - elif dotrim and source in self.__inc_list and \ - dtype == "incorporate": - # This is an incorporation package that will not be - # removed, so if dependencies can't be satisfied, try - # again with dotrim=False to ignore rejections due to - # proposed packages. - return self.__parse_dependency(dependency_action, - source, dotrim=False, check_req=check_req, - proposed_dict=proposed_dict) - - # Neither build or publisher is interesting for dependencies. - fstr = fmri.get_fmri(anarchy=True, include_build=False, - include_scheme=False) - - # we're going to toss an exception - if dtype == "exclude": - # If we reach this point, we know that a required - # package (already installed or proposed) was excluded. - matching, nonmatching = self.__comb_older_fmris( - fmri, dotrim=False, obsolete_ok=False) - - # Determine if excluded package is already installed. - installed = False - for f in nonmatching: - if f in self.__installed_fmris: - installed = True - break - - if not matching and installed: - # The exclude dependency doesn't allow the - # version of the package that is already - # installed. - raise DependencyException( - _TRIM_INSTALLED_EXCLUDE, - (N_("Package contains 'exclude' dependency " - "{0} on installed package"), (fstr,))) - elif not matching and not installed: - # The exclude dependency doesn't allow any - # version of the package that is proposed. - raise DependencyException( - _TRIM_INSTALLED_EXCLUDE, - (N_("Package contains 'exclude' dependency " - "{0} on proposed package"), (fstr,))) - else: - # All versions of the package allowed by the - # exclude dependency were trimmed by other - # dependencies. If changed, update _fmri_errors - # _TRIM_DEP_TRIMMED. - raise DependencyException( - _TRIM_DEP_TRIMMED, - (N_("No version allowed by 'exclude' " - "dependency {0} could be installed"), - (fstr,)), matching) - # not reached - elif dtype == "incorporate": - matching, nonmatching = \ - self.__comb_auto_fmris(fmri, dotrim=False, - obsolete_ok=obsolete_ok) - - # check if allowing obsolete packages helps - - elif not obsolete_ok: - # see if allowing obsolete pkgs gets us some matches - if len(fmris) == 1: - matching, nonmatching = \ - self.__comb_newer_fmris(fmri, dotrim, - obsolete_ok=True) - else: - matching = [] - nonmatching = [] - for f in fmris: - m, nm = self.__comb_newer_fmris(f, - dotrim, obsolete_ok=True) - matching.extend(m) - nonmatching.extend(nm) - if matching: - if len(fmris) == 1: - raise DependencyException( - _TRIM_DEP_OBSOLETE, - (N_("All acceptable versions of " - "'{0}' dependency on {1} are " - "obsolete"), (dtype, fstr))) - else: - sfmris = frozenset([ - fmri.get_fmri(anarchy=True, - include_build=False, - include_scheme=False) - for f in fmris - ]) - raise DependencyException( - _TRIM_DEP_OBSOLETE, - (N_("All acceptable versions of " - "'{0}' dependencies on {1} are " - "obsolete"), (dtype, sfmris))) - # something else is wrong - matching, nonmatching = self.__comb_newer_fmris(fmri, - dotrim=False, obsolete_ok=obsolete_ok) + ) + raise DependencyException( + _TRIM_DEP_OBSOLETE, + ( + N_( + "All acceptable versions of " + "'{0}' dependencies on {1} are " + "obsolete" + ), + (dtype, sfmris), + ), + ) + # something else is wrong + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim=False, obsolete_ok=obsolete_ok + ) + else: + # try w/o trimming anything + matching, nonmatching = self.__comb_newer_fmris( + fmri, dotrim=False, obsolete_ok=obsolete_ok + ) + + if not matching: + raise DependencyException( + _TRIM_DEP_MISSING, + ( + N_( + "No version for '{0}' dependency on {1} can " "be found" + ), + (dtype, fstr), + ), + ) + + # If this is a dependency of a proposed package for which only + # one version is possible, then mark all other versions as + # rejected by this package. This ensures that other proposed + # packages will be included in error messaging if their + # dependencies can only be satisfied if this one is not + # proposed. + if ( + dotrim + and nonmatching + and proposed_dict + and proposed_dict.get(source.pkg_name, []) == [source] + ): + nm = self.__parse_dependency( + dependency_action, + source, + dotrim=False, + check_req=check_req, + proposed_dict=proposed_dict, + )[0] + self.__trim( + nm, + _TRIM_DEP_TRIMMED, + ( + N_( + "Rejected by '{0}' dependency in proposed " + "package '{1}'" + ), + (dtype, source.pkg_name), + ), + fmri_adds=[source], + ) + + # If changed, update _fmri_errors _TRIM_DEP_TRIMMED. + raise DependencyException( + _TRIM_DEP_TRIMMED, + ( + N_( + "No version matching '{0}' dependency {1} can be " + "installed" + ), + (dtype, fstr), + ), + matching, + ) + + def __installed_unsatisfied_parent_deps( + self, excludes, ignore_inst_parent_deps + ): + """If we're a child image then we need to relax packages + that are dependent upon themselves in the parent image. This + is necessary to keep those packages in sync.""" + + relax_pkgs = set() + + # check if we're a child image. + if not self.__is_child(): + return relax_pkgs + + # if we're ignoring parent dependencies there is no reason to + # relax install-holds in packages constrained by those + # dependencies. + if ignore_inst_parent_deps: + return relax_pkgs + + for f in self.__installed_fmris: + for da in self.__get_dependency_actions(f, excludes): + if da.attrs["type"] != "parent": + continue + self.__linked_pkgs.add(f) + + if pkg.actions.depend.DEPEND_SELF not in da.attrlist("fmri"): + continue + + # We intentionally do not rely on 'insync' state + # as a change in facets/variants may result in + # changed parent constraints. + pf = self.__parent_dict.get(f.pkg_name) + if pf != f: + # We only need to relax packages that + # don't match the parent. + relax_pkgs.add(f.pkg_name) + break + + return relax_pkgs + + def __generate_dependency_errors(self, fmri_list, excludes=EmptyI): + """Returns a list of strings describing why fmris cannot + be installed, or returns an empty list if installation + is possible.""" + ret = [] + + needs_processing = set(fmri_list) + already_processed = set() + already_seen = set() + + while needs_processing: + fmri = needs_processing.pop() + errors, newfmris = self.__do_error_work( + fmri, excludes, already_seen + ) + ret.extend(errors) + already_processed.add(fmri) + needs_processing |= newfmris - already_processed + return ret + + def get_trim_errors(self): + """Returns a list of strings for all FMRIs evaluated by the + solver explaining why they were rejected. (All packages + found in solver's trim database.)""" + + # At a minimum, a solve_*() method must have been called first. + assert self.__state != SOLVER_INIT + # Value 'DebugValues' is unsubscriptable; + # pylint: disable=E1136 + assert DebugValues["plan"] + + return self.__fmri_list_errors( + six.iterkeys(self.__trim_dict), already_seen=set(), verbose=True + ) + + def __check_installed(self): + """Generate list of strings describing why currently + installed packages cannot be installed, or empty list""" + + # Used to de-dup errors. + already_seen = set() + + ret = [] + for f in self.__installed_fmris - self.__removal_fmris: + matching = self.__comb_newer_fmris( + f, dotrim=True, obsolete_ok=True + )[0] + if matching: + continue + # no matches when disallowed packages are excluded + matching = self.__comb_newer_fmris( + f, dotrim=False, obsolete_ok=True + )[0] + + ret.append( + _( + "No suitable version of installed package " "{0} found" + ).format(f.pkg_name) + ) + ret.extend( + self.__fmri_list_errors(matching, already_seen=already_seen) + ) + + return ret + + def __fmri_list_errors( + self, fmri_list, indent="", already_seen=None, omit=None, verbose=False + ): + """Given a list of FMRIs, return indented strings indicating why + they were rejected.""" + ret = [] + + if omit is None: + omit = set() + + fmri_reasons = [] + skey = operator.attrgetter("pkg_name") + for f in sorted(fmri_list, key=skey): + res = self.__fmri_errors( + f, indent, already_seen=already_seen, omit=omit, verbose=verbose + ) + # If None was returned, that implies that all of the + # reasons the FMRI was rejected aren't interesting. + if res is not None: + fmri_reasons.append(res) + + last_run = [] + + def collapse_fmris(): + """Collapse a range of FMRIs into format: + + first_fmri + to + last_fmri + + ...based on verbose state.""" + + if last_run: + indent = last_run.pop(0) + if verbose or len(last_run) <= 1: + ret.extend(last_run) + elif not self.__depend_ts and ret[-1].endswith( + last_run[-1].strip() + ): + # If timestamps are not being displayed + # and the last FMRI is the same as the + # first in the range then we only need + # to show the first. + pass else: - # try w/o trimming anything - matching, nonmatching = self.__comb_newer_fmris(fmri, - dotrim=False, obsolete_ok=obsolete_ok) - - if not matching: - raise DependencyException(_TRIM_DEP_MISSING, - (N_("No version for '{0}' dependency on {1} can " - "be found"), (dtype, fstr))) - - # If this is a dependency of a proposed package for which only - # one version is possible, then mark all other versions as - # rejected by this package. This ensures that other proposed - # packages will be included in error messaging if their - # dependencies can only be satisfied if this one is not - # proposed. - if dotrim and nonmatching and proposed_dict and \ - proposed_dict.get(source.pkg_name, []) == [source]: - nm = self.__parse_dependency(dependency_action, source, - dotrim=False, check_req=check_req, - proposed_dict=proposed_dict)[0] - self.__trim(nm, _TRIM_DEP_TRIMMED, - (N_("Rejected by '{0}' dependency in proposed " - "package '{1}'"), (dtype, source.pkg_name)), - fmri_adds=[source]) - - # If changed, update _fmri_errors _TRIM_DEP_TRIMMED. - raise DependencyException(_TRIM_DEP_TRIMMED, - (N_("No version matching '{0}' dependency {1} can be " - "installed"), (dtype, fstr)), matching) - - def __installed_unsatisfied_parent_deps(self, excludes, - ignore_inst_parent_deps): - """If we're a child image then we need to relax packages - that are dependent upon themselves in the parent image. This - is necessary to keep those packages in sync.""" - - relax_pkgs = set() - - # check if we're a child image. - if not self.__is_child(): - return relax_pkgs - - # if we're ignoring parent dependencies there is no reason to - # relax install-holds in packages constrained by those - # dependencies. - if ignore_inst_parent_deps: - return relax_pkgs - - for f in self.__installed_fmris: - for da in self.__get_dependency_actions(f, excludes): - if da.attrs["type"] != "parent": - continue - self.__linked_pkgs.add(f) - - if (pkg.actions.depend.DEPEND_SELF - not in da.attrlist("fmri")): - continue - - # We intentionally do not rely on 'insync' state - # as a change in facets/variants may result in - # changed parent constraints. - pf = self.__parent_dict.get(f.pkg_name) - if pf != f: - # We only need to relax packages that - # don't match the parent. - relax_pkgs.add(f.pkg_name) - break - - return relax_pkgs - - def __generate_dependency_errors(self, fmri_list, excludes=EmptyI): - """ Returns a list of strings describing why fmris cannot - be installed, or returns an empty list if installation - is possible. """ - ret = [] - - needs_processing = set(fmri_list) - already_processed = set() - already_seen = set() - - while needs_processing: - fmri = needs_processing.pop() - errors, newfmris = self.__do_error_work(fmri, - excludes, already_seen) - ret.extend(errors) - already_processed.add(fmri) - needs_processing |= newfmris - already_processed - return ret - - def get_trim_errors(self): - """Returns a list of strings for all FMRIs evaluated by the - solver explaining why they were rejected. (All packages - found in solver's trim database.)""" - - # At a minimum, a solve_*() method must have been called first. - assert self.__state != SOLVER_INIT - # Value 'DebugValues' is unsubscriptable; - # pylint: disable=E1136 - assert DebugValues["plan"] - - return self.__fmri_list_errors(six.iterkeys(self.__trim_dict), - already_seen=set(), verbose=True) - - def __check_installed(self): - """Generate list of strings describing why currently - installed packages cannot be installed, or empty list""" - - # Used to de-dup errors. - already_seen = set() - - ret = [] - for f in self.__installed_fmris - self.__removal_fmris: - matching = self.__comb_newer_fmris(f, dotrim=True, - obsolete_ok=True)[0] - if matching: - continue - # no matches when disallowed packages are excluded - matching = self.__comb_newer_fmris(f, dotrim=False, - obsolete_ok=True)[0] - - ret.append(_("No suitable version of installed package " - "{0} found").format(f.pkg_name)) - ret.extend(self.__fmri_list_errors(matching, - already_seen=already_seen)) - - return ret - - def __fmri_list_errors(self, fmri_list, indent="", already_seen=None, - omit=None, verbose=False): - """Given a list of FMRIs, return indented strings indicating why - they were rejected.""" - ret = [] - - if omit is None: - omit = set() - - fmri_reasons = [] - skey = operator.attrgetter('pkg_name') - for f in sorted(fmri_list, key=skey): - res = self.__fmri_errors(f, indent, - already_seen=already_seen, omit=omit, - verbose=verbose) - # If None was returned, that implies that all of the - # reasons the FMRI was rejected aren't interesting. - if res is not None: - fmri_reasons.append(res) - - last_run = [] - def collapse_fmris(): - """Collapse a range of FMRIs into format: - - first_fmri - to - last_fmri - - ...based on verbose state.""" - - if last_run: - indent = last_run.pop(0) - if verbose or len(last_run) <= 1: - ret.extend(last_run) - elif (not self.__depend_ts and - ret[-1].endswith(last_run[-1].strip())): - # If timestamps are not being displayed - # and the last FMRI is the same as the - # first in the range then we only need - # to show the first. - pass - else: - ret.append(indent + " " + _("to")) - ret.append(last_run[-1]) - last_run[::] = [] - - last_reason = None - for fmri_id, reason in fmri_reasons: - if reason == last_reason: - indent = " " * len(fmri_id[0]) - if not last_run: - last_run.append(indent) - last_run.append(indent + fmri_id[1]) - continue - else: # ends run - collapse_fmris() - if last_reason: - ret.extend(last_reason) - ret.append(fmri_id[0] + fmri_id[1]) - last_reason = reason + ret.append(indent + " " + _("to")) + ret.append(last_run[-1]) + last_run[::] = [] + + last_reason = None + for fmri_id, reason in fmri_reasons: + if reason == last_reason: + indent = " " * len(fmri_id[0]) + if not last_run: + last_run.append(indent) + last_run.append(indent + fmri_id[1]) + continue + else: # ends run + collapse_fmris() if last_reason: - collapse_fmris() - ret.extend(last_reason) - return ret - - def __fmri_errors(self, fmri, indent="", already_seen=None, - omit=None, verbose=False): - """return a list of strings w/ indents why this fmri is not - suitable""" - - if already_seen is None: - already_seen = set() - if omit is None: - omit = set() - - fmri_id = [_("{0} Reject: ").format(indent)] - if not verbose and not self.__depend_ts: - # Exclude build and timestamp for brevity. - fmri_id.append(fmri.get_short_fmri()) - else: - # Include timestamp for clarity if any dependency - # included a timestamp; exclude build for brevity. - fmri_id.append(fmri.get_fmri(include_build=False)) - - tag = _("Reason:") - - if fmri in already_seen: - if fmri in omit: - return - - # note to translators: 'indent' will be a series of - # whitespaces. - reason = _("{indent} {tag} [already rejected; see " - "above]").format(indent=indent, tag=tag) - return fmri_id, [reason] - - already_seen.add(fmri) - - if not verbose: - # By default, omit packages from errors that were only - # rejected due to a newer version being installed, or - # because they didn't match user-specified input. It's - # tempting to omit _TRIM_REJECT here as well, but that - # leads to some very mysterious errors for - # administrators if the only reason an operation failed - # is because a required dependency was rejected. - for reason_id, reason_t, fmris in \ - self.__trim_dict.get(fmri, EmptyI): - if reason_id not in (_TRIM_INSTALLED_NEWER, - _TRIM_PROPOSED_PUB, _TRIM_PROPOSED_VER): - break - else: - omit.add(fmri) - return - - ms = [] - for reason_id, reason_t, fmris in sorted( - self.__trim_dict.get(fmri, EmptyI)): - - if not verbose: - if reason_id in (_TRIM_INSTALLED_NEWER, - _TRIM_PROPOSED_PUB, _TRIM_PROPOSED_VER): - continue - - if isinstance(reason_t, tuple): - reason = _(reason_t[0]).format(*reason_t[1]) - else: - reason = _(reason_t) - - ms.append("{0} {1} {2}".format(indent, tag, reason)) - - if reason in already_seen: - # If we've already explained why something was - # rejected before, skip it. - continue - - # Use the reason text and not the id, as the text is - # specific to a particular rejection. - already_seen.add(reason) - - # By default, don't include error output for - # dependencies on incorporation packages that don't - # specify a version since any version-specific - # dependencies will have caused a rejection elsewhere. - if (not verbose and - reason_id == _TRIM_DEP_TRIMMED and - len(reason_t[1]) == 2): - dtype, fstr = reason_t[1] - if dtype == "require" and "@" not in fstr: - # Assumes fstr does not include - # publisher or scheme. - if fstr in self.__known_incs: - continue - - # Add the reasons why each package version that - # satisfied a dependency was rejected. - res = self.__fmri_list_errors([ - f - for f in sorted(fmris) - if f not in already_seen - if verbose or f not in omit - ], - indent + " ", - already_seen=already_seen, - omit=omit, - verbose=verbose - ) - - if res: - ms.append(indent + " " + ("-" * 40)) - ms.extend(res) - ms.append(indent + " " + ("-" * 40)) - - return fmri_id, ms - - def __do_error_work(self, fmri, excludes, already_seen): - """Private helper function used by __generate_dependency_errors - to determine why packages were rejected.""" - - needs_processing = set() - - if self.__trim_dict.get(fmri): - return self.__fmri_list_errors([fmri], - already_seen=already_seen), needs_processing + ret.extend(last_reason) + ret.append(fmri_id[0] + fmri_id[1]) + last_reason = reason + if last_reason: + collapse_fmris() + ret.extend(last_reason) + return ret + + def __fmri_errors( + self, fmri, indent="", already_seen=None, omit=None, verbose=False + ): + """return a list of strings w/ indents why this fmri is not + suitable""" + + if already_seen is None: + already_seen = set() + if omit is None: + omit = set() + + fmri_id = [_("{0} Reject: ").format(indent)] + if not verbose and not self.__depend_ts: + # Exclude build and timestamp for brevity. + fmri_id.append(fmri.get_short_fmri()) + else: + # Include timestamp for clarity if any dependency + # included a timestamp; exclude build for brevity. + fmri_id.append(fmri.get_fmri(include_build=False)) + + tag = _("Reason:") + + if fmri in already_seen: + if fmri in omit: + return + + # note to translators: 'indent' will be a series of + # whitespaces. + reason = _( + "{indent} {tag} [already rejected; see " "above]" + ).format(indent=indent, tag=tag) + return fmri_id, [reason] + + already_seen.add(fmri) + + if not verbose: + # By default, omit packages from errors that were only + # rejected due to a newer version being installed, or + # because they didn't match user-specified input. It's + # tempting to omit _TRIM_REJECT here as well, but that + # leads to some very mysterious errors for + # administrators if the only reason an operation failed + # is because a required dependency was rejected. + for reason_id, reason_t, fmris in self.__trim_dict.get( + fmri, EmptyI + ): + if reason_id not in ( + _TRIM_INSTALLED_NEWER, + _TRIM_PROPOSED_PUB, + _TRIM_PROPOSED_VER, + ): + break + else: + omit.add(fmri) + return + + ms = [] + for reason_id, reason_t, fmris in sorted( + self.__trim_dict.get(fmri, EmptyI) + ): + if not verbose: + if reason_id in ( + _TRIM_INSTALLED_NEWER, + _TRIM_PROPOSED_PUB, + _TRIM_PROPOSED_VER, + ): + continue + + if isinstance(reason_t, tuple): + reason = _(reason_t[0]).format(*reason_t[1]) + else: + reason = _(reason_t) + + ms.append("{0} {1} {2}".format(indent, tag, reason)) + + if reason in already_seen: + # If we've already explained why something was + # rejected before, skip it. + continue + + # Use the reason text and not the id, as the text is + # specific to a particular rejection. + already_seen.add(reason) + + # By default, don't include error output for + # dependencies on incorporation packages that don't + # specify a version since any version-specific + # dependencies will have caused a rejection elsewhere. + if ( + not verbose + and reason_id == _TRIM_DEP_TRIMMED + and len(reason_t[1]) == 2 + ): + dtype, fstr = reason_t[1] + if dtype == "require" and "@" not in fstr: + # Assumes fstr does not include + # publisher or scheme. + if fstr in self.__known_incs: + continue + + # Add the reasons why each package version that + # satisfied a dependency was rejected. + res = self.__fmri_list_errors( + [ + f + for f in sorted(fmris) + if f not in already_seen + if verbose or f not in omit + ], + indent + " ", + already_seen=already_seen, + omit=omit, + verbose=verbose, + ) + + if res: + ms.append(indent + " " + ("-" * 40)) + ms.extend(res) + ms.append(indent + " " + ("-" * 40)) + + return fmri_id, ms + + def __do_error_work(self, fmri, excludes, already_seen): + """Private helper function used by __generate_dependency_errors + to determine why packages were rejected.""" + + needs_processing = set() + + if self.__trim_dict.get(fmri): + return ( + self.__fmri_list_errors([fmri], already_seen=already_seen), + needs_processing, + ) + + for a in self.__get_dependency_actions(fmri, excludes): + try: + matching = self.__parse_dependency(a, fmri, check_req=True)[1] + except DependencyException as e: + self.__trim((fmri,), e.reason_id, e.reason, fmri_adds=e.fmris) + s = _( + "No suitable version of required package " "{0} found:" + ).format(fmri.pkg_name) + return ( + [s] + + self.__fmri_list_errors( + [fmri], already_seen=already_seen + ), + set(), + ) + needs_processing |= matching + return [], needs_processing + + # clause generation routines + def __gen_dependency_clauses(self, fmri, da, dotrim=True): + """Return clauses to implement this dependency""" + nm, m, cond, dtype, _req, _depf = self.__parse_dependency( + da, fmri, dotrim + ) + + if dtype == "require" or dtype == "require-any": + return self.__gen_require_clauses(fmri, m) + elif dtype == "group" or dtype == "group-any": + if not m: + return [] # no clauses needed; pkg avoided + else: + return self.__gen_require_clauses(fmri, m) + elif dtype == "conditional": + return self.__gen_require_conditional_clauses(fmri, m, cond) + elif dtype in ["origin", "parent"]: + # handled by trimming proposed set, not by solver + return [] + else: + return self.__gen_negation_clauses(fmri, nm) + + def __gen_highlander_clauses(self, fmri_list): + """Return a list of clauses that specifies only one or zero + of the fmris in fmri_list may be installed. This prevents + multiple versions of the same package being installed + at once""" + + # pair wise negation + # if a has 4 versions, we need + # [ + # [-a.1, -a.2], + # [-a.1, -a.3], + # [-a.1, -a.4], + # [-a.2, -a.3], + # [-a.2, -a.4], + # [-a.3, -a.4] + # ] + # n*(n-1)/2 algorithms suck + + if len(fmri_list) == 1: # avoid generation of singletons + return [] + + id_list = [-self.__getid(fmri) for fmri in fmri_list] + l = len(id_list) + + return [ + [id_list[i], id_list[j]] + for i in range(l - 1) + for j in range(i + 1, l) + ] + + def __gen_require_clauses(self, fmri, matching_fmri_list): + """generate clause for require dependency: if fmri is + installed, one of fmri_list is required""" + # if a.1 requires b.2, b.3 or b.4: + # !a.1 | b.2 | b.3 | b.4 + + return [ + [-self.__getid(fmri)] + + [self.__getid(fmri) for fmri in matching_fmri_list] + ] + + def __gen_require_conditional_clauses( + self, fmri, matching_fmri_list, conditional_fmri_list + ): + """Generate clauses for conditional dependency: if + fmri is installed and one of conditional_fmri_list is installed, + one of fmri list is required""" + # if a.1 requires c.2, c.3, c.4 if b.2 or newer is installed: + # !a.1 | !b.2 | c.2 | c.3 | c.4 + # !a.1 | !b.3 | c.2 | c.3 | c.4 + mlist = [self.__getid(f) for f in matching_fmri_list] + + return [ + [-self.__getid(fmri)] + [-self.__getid(c)] + mlist + for c in conditional_fmri_list + ] + + def __gen_negation_clauses(self, fmri, non_matching_fmri_list): + """generate clauses for optional, incorporate and + exclude dependencies to exclude non-acceptable versions""" + # if present, fmri must match ok list + # if a.1 optionally requires b.3: + # [ + # [!a.1 | !b.1], + # [!a.1 | !b.2] + # ] + fmri_id = self.__getid(fmri) + return [[-fmri_id, -self.__getid(f)] for f in non_matching_fmri_list] + + def __gen_one_of_these_clauses(self, fmri_list): + """generate clauses such that at least one of the fmri_list + members gets installed""" + # If a has four versions, + # a.1|a.2|a.3|a.4 + # plus highlander clauses + assert fmri_list, "Empty list of which one is required" + return [[self.__getid(fmri) for fmri in fmri_list]] + + def __addclauses(self, clauses): + """add list of clause lists to solver""" + + for c in clauses: + try: + if not self.__solver.add_clause(c): + self.__addclause_failure = True + self.__clauses += 1 + except TypeError: + raise TypeError( + _("List of integers, not {0}, " "expected").format(c) + ) - for a in self.__get_dependency_actions(fmri, excludes): - try: - matching = self.__parse_dependency(a, fmri, - check_req=True)[1] - except DependencyException as e: - self.__trim((fmri,), e.reason_id, e.reason, - fmri_adds=e.fmris) - s = _("No suitable version of required package " - "{0} found:").format(fmri.pkg_name) - return ([s] + self.__fmri_list_errors([fmri], - already_seen=already_seen), - set()) - needs_processing |= matching - return [], needs_processing - - # clause generation routines - def __gen_dependency_clauses(self, fmri, da, dotrim=True): - """Return clauses to implement this dependency""" - nm, m, cond, dtype, _req, _depf = self.__parse_dependency(da, - fmri, dotrim) - - if dtype == "require" or dtype == "require-any": - return self.__gen_require_clauses(fmri, m) - elif dtype == "group" or dtype == "group-any": - if not m: - return [] # no clauses needed; pkg avoided - else: - return self.__gen_require_clauses(fmri, m) - elif dtype == "conditional": - return self.__gen_require_conditional_clauses(fmri, m, - cond) - elif dtype in ["origin", "parent"]: - # handled by trimming proposed set, not by solver - return [] + def __get_child_holds(self, install_holds, pkg_cons, inc_set): + """Returns the list of installed packages that are incorporated + by packages, delivering an install-hold, and that do not have an + install-hold but incorporate packages. + + 'install_holds' is a dict of installed package stems indicating + the pkg.depend.install-hold delivered by the package that are + not being removed. + + 'pkg_cons' is a dict of installed package fmris and the + incorporate constraints they deliver. + + 'inc_set' is a list of packages that incorporate other packages + and deliver install-hold actions. It acts as the starting point + where we fan out to find "child" packages that incorporate other + packages.""" + + unprocessed = set(inc_set) + processed = set() + proc_cons = set() + incorps = set() + + while unprocessed: + self.__progress() + ifmri = unprocessed.pop() + processed.add(ifmri) + + if ifmri in self.__removal_fmris: + # This package will be removed, so + # nothing to do. + continue + + cons = pkg_cons.get(ifmri, []) + if cons and ifmri.pkg_name not in install_holds: + # If this package incorporates other + # packages and does not deliver an + # install-hold, then consider it a + # 'child' hold. + incorps.add(ifmri) + + # Find all incorporation constraints that result + # in only one possible match. If there is only + # one possible match for an incorporation + # constraint then that package will not be + # upgraded and should be checked for + # incorporation constraints. + for con in cons: + if con.pkg_name in install_holds or con in proc_cons: + # Already handled. + continue + matching = list(self.__comb_auto_fmris(con)[0]) + if len(matching) == 1: + if matching[0] not in processed: + unprocessed.add(matching[0]) else: - return self.__gen_negation_clauses(fmri, nm) - - def __gen_highlander_clauses(self, fmri_list): - """Return a list of clauses that specifies only one or zero - of the fmris in fmri_list may be installed. This prevents - multiple versions of the same package being installed - at once""" - - # pair wise negation - # if a has 4 versions, we need - # [ - # [-a.1, -a.2], - # [-a.1, -a.3], - # [-a.1, -a.4], - # [-a.2, -a.3], - # [-a.2, -a.4], - # [-a.3, -a.4] - # ] - # n*(n-1)/2 algorithms suck - - if len(fmri_list) == 1: # avoid generation of singletons - return [] - - id_list = [ -self.__getid(fmri) for fmri in fmri_list] - l = len(id_list) - - return [ - [id_list[i], id_list[j]] - for i in range(l-1) - for j in range(i+1, l) - ] - - def __gen_require_clauses(self, fmri, matching_fmri_list): - """generate clause for require dependency: if fmri is - installed, one of fmri_list is required""" - # if a.1 requires b.2, b.3 or b.4: - # !a.1 | b.2 | b.3 | b.4 - - return [ - [-self.__getid(fmri)] + - [self.__getid(fmri) for fmri in matching_fmri_list] - ] - - def __gen_require_conditional_clauses(self, fmri, matching_fmri_list, - conditional_fmri_list): - """Generate clauses for conditional dependency: if - fmri is installed and one of conditional_fmri_list is installed, - one of fmri list is required""" - # if a.1 requires c.2, c.3, c.4 if b.2 or newer is installed: - # !a.1 | !b.2 | c.2 | c.3 | c.4 - # !a.1 | !b.3 | c.2 | c.3 | c.4 - mlist = [self.__getid(f) for f in matching_fmri_list] - - return [ - [-self.__getid(fmri)] + [-self.__getid(c)] + mlist - for c in conditional_fmri_list - ] - - def __gen_negation_clauses(self, fmri, non_matching_fmri_list): - """ generate clauses for optional, incorporate and - exclude dependencies to exclude non-acceptable versions""" - # if present, fmri must match ok list - # if a.1 optionally requires b.3: - # [ - # [!a.1 | !b.1], - # [!a.1 | !b.2] - # ] - fmri_id = self.__getid(fmri) - return [ - [-fmri_id, -self.__getid(f)] - for f in non_matching_fmri_list - ] - - def __gen_one_of_these_clauses(self, fmri_list): - """generate clauses such that at least one of the fmri_list - members gets installed""" - # If a has four versions, - # a.1|a.2|a.3|a.4 - # plus highlander clauses - assert fmri_list, "Empty list of which one is required" - return [[self.__getid(fmri) for fmri in fmri_list]] - - def __addclauses(self, clauses): - """add list of clause lists to solver""" - - for c in clauses: + # Track which constraints have + # already been processed + # seperately from which + # package FMRIs have been + # processed to avoid (unlikely) + # collision. + proc_cons.add(con) + + return incorps + + def __get_installed_upgradeable_incorps(self, excludes=EmptyI): + """Return the latest version of installed upgradeable + incorporations w/ install holds""" + + installed_incs = [] + for f in self.__installed_fmris - self.__removal_fmris: + for d in self.__catalog.get_entry_actions( + f, [catalog.Catalog.DEPENDENCY], excludes=excludes + ): + if ( + d.name == "set" + and d.attrs["name"] == "pkg.depend.install-hold" + ): + installed_incs.append(f) + + ret = [] + for f in installed_incs: + matching = self.__comb_newer_fmris(f, dotrim=False)[0] + latest = sorted(matching, reverse=True)[0] + if latest != f: + ret.append(latest) + return ret + + def __get_installed_unbound_inc_list(self, proposed_pkgs, excludes=EmptyI): + """Return the list of incorporations that are to not to change + during this install operation, and the lists of fmris they + constrain.""" + + incorps = set() + versioned_dependents = set() + pkg_cons = {} + install_holds = {} + + # Determine installed packages that contain incorporation + # dependencies, those packages that are depended on by explict + # version, and those that have pkg.depend.install-hold values. + for f in self.__installed_fmris - self.__removal_fmris: + for d in self.__catalog.get_entry_actions( + f, [catalog.Catalog.DEPENDENCY], excludes=excludes + ): + if d.name == "depend": + fmris = [] + for fl in d.attrlist("fmri"): try: - if not self.__solver.add_clause(c): - self.__addclause_failure = True - self.__clauses += 1 - except TypeError: - raise TypeError(_("List of integers, not {0}, " - "expected").format(c)) - - def __get_child_holds(self, install_holds, pkg_cons, inc_set): - """Returns the list of installed packages that are incorporated - by packages, delivering an install-hold, and that do not have an - install-hold but incorporate packages. - - 'install_holds' is a dict of installed package stems indicating - the pkg.depend.install-hold delivered by the package that are - not being removed. - - 'pkg_cons' is a dict of installed package fmris and the - incorporate constraints they deliver. - - 'inc_set' is a list of packages that incorporate other packages - and deliver install-hold actions. It acts as the starting point - where we fan out to find "child" packages that incorporate other - packages.""" - - unprocessed = set(inc_set) - processed = set() - proc_cons = set() - incorps = set() - - while unprocessed: - self.__progress() - ifmri = unprocessed.pop() - processed.add(ifmri) - - if ifmri in self.__removal_fmris: - # This package will be removed, so - # nothing to do. - continue - - cons = pkg_cons.get(ifmri, []) - if cons and ifmri.pkg_name not in install_holds: - # If this package incorporates other - # packages and does not deliver an - # install-hold, then consider it a - # 'child' hold. - incorps.add(ifmri) - - # Find all incorporation constraints that result - # in only one possible match. If there is only - # one possible match for an incorporation - # constraint then that package will not be - # upgraded and should be checked for - # incorporation constraints. - for con in cons: - if (con.pkg_name in install_holds or - con in proc_cons): - # Already handled. - continue - matching = list( - self.__comb_auto_fmris(con)[0]) - if len(matching) == 1: - if matching[0] not in processed: - unprocessed.add(matching[0]) - else: - # Track which constraints have - # already been processed - # seperately from which - # package FMRIs have been - # processed to avoid (unlikely) - # collision. - proc_cons.add(con) - - return incorps - - def __get_installed_upgradeable_incorps(self, excludes=EmptyI): - """Return the latest version of installed upgradeable - incorporations w/ install holds""" - - installed_incs = [] - for f in self.__installed_fmris - self.__removal_fmris: - for d in self.__catalog.get_entry_actions(f, - [catalog.Catalog.DEPENDENCY], excludes=excludes): - if (d.name == "set" and d.attrs["name"] == - "pkg.depend.install-hold"): - installed_incs.append(f) - - ret = [] - for f in installed_incs: - matching = self.__comb_newer_fmris(f, dotrim=False)[0] - latest = sorted(matching, reverse=True)[0] - if latest != f: - ret.append(latest) - return ret - - def __get_installed_unbound_inc_list(self, proposed_pkgs, - excludes=EmptyI): - """Return the list of incorporations that are to not to change - during this install operation, and the lists of fmris they - constrain.""" - - incorps = set() - versioned_dependents = set() - pkg_cons = {} - install_holds = {} - - # Determine installed packages that contain incorporation - # dependencies, those packages that are depended on by explict - # version, and those that have pkg.depend.install-hold values. - for f in self.__installed_fmris - self.__removal_fmris: - for d in self.__catalog.get_entry_actions(f, - [catalog.Catalog.DEPENDENCY], - excludes=excludes): - if d.name == "depend": - fmris = [] - for fl in d.attrlist("fmri"): - try: - tmp = self.__fmridict[ - fl] - except KeyError: - tmp = pkg.fmri.PkgFmri( - fl) - self.__fmridict[fl] = \ - tmp - fmris.append(tmp) - if d.attrs["type"] == "incorporate": - incorps.add(f.pkg_name) - pkg_cons.setdefault(f, - []).append(fmris[0]) - versioned_dependents.update( - fmri.pkg_name - for fmri in fmris - if fmri.version is not None - ) - elif (d.name == "set" and d.attrs["name"] == - "pkg.depend.install-hold"): - install_holds[f.pkg_name] = \ - d.attrs["value"] - - # find install holds that appear on command line and are thus - # relaxed - relaxed_holds = set([ - install_holds[name] - for name in proposed_pkgs - if name in install_holds - ]) - - # add any other install holds that are relaxed because they have - # values that start w/ the relaxed ones... - relaxed_holds |= set([ - hold - for hold in six.itervalues(install_holds) - if [ r for r in relaxed_holds if hold.startswith(r + ".") ] - ]) - - # Expand the list of install holds to include packages that are - # incorporated by packages delivering an install-hold and that - # do not have an install-hold, but incorporate packages. - child_holds = self.__get_child_holds(install_holds, pkg_cons, - set(inc for inc in pkg_cons - if inc.pkg_name in install_holds and - install_holds[inc.pkg_name] not in relaxed_holds + tmp = self.__fmridict[fl] + except KeyError: + tmp = pkg.fmri.PkgFmri(fl) + self.__fmridict[fl] = tmp + fmris.append(tmp) + if d.attrs["type"] == "incorporate": + incorps.add(f.pkg_name) + pkg_cons.setdefault(f, []).append(fmris[0]) + versioned_dependents.update( + fmri.pkg_name + for fmri in fmris + if fmri.version is not None ) + elif ( + d.name == "set" + and d.attrs["name"] == "pkg.depend.install-hold" + ): + install_holds[f.pkg_name] = d.attrs["value"] + + # find install holds that appear on command line and are thus + # relaxed + relaxed_holds = set( + [ + install_holds[name] + for name in proposed_pkgs + if name in install_holds + ] + ) + + # add any other install holds that are relaxed because they have + # values that start w/ the relaxed ones... + relaxed_holds |= set( + [ + hold + for hold in six.itervalues(install_holds) + if [r for r in relaxed_holds if hold.startswith(r + ".")] + ] + ) + + # Expand the list of install holds to include packages that are + # incorporated by packages delivering an install-hold and that + # do not have an install-hold, but incorporate packages. + child_holds = self.__get_child_holds( + install_holds, + pkg_cons, + set( + inc + for inc in pkg_cons + if inc.pkg_name in install_holds + and install_holds[inc.pkg_name] not in relaxed_holds + ), + ) + + for child_hold in child_holds: + assert child_hold.pkg_name not in install_holds + install_holds[child_hold.pkg_name] = child_hold.pkg_name + + # versioned_dependents contains all the packages that are + # depended on w/ a explicit version. We now modify this list so + # that it does not contain any packages w/ install_holds, unless + # those holds were relaxed. + versioned_dependents -= set( + [ + pkg_name + for pkg_name, hold_value in six.iteritems(install_holds) + if hold_value not in relaxed_holds + ] + ) + # Build the list of fmris that 1) contain incorp. dependencies + # 2) are not in the set of versioned_dependents and 3) do not + # explicitly appear on the install command line. + installed_dict = self.__installed_dict + ret = [ + installed_dict[pkg_name] + for pkg_name in incorps - versioned_dependents + if pkg_name not in proposed_pkgs + if installed_dict[pkg_name] not in self.__removal_fmris + ] + # For each incorporation above that will not change, return a + # list of the fmris that incorporation constrains + con_lists = [[i for i in pkg_cons[inc]] for inc in ret] + + return ret, con_lists + + def __mark_pub_trimmed(self, pkg_name): + """Record that a given package stem has been trimmed based on + publisher.""" + + self.__pub_trim[pkg_name] = True + + def __filter_publishers(self, pkg_name): + """Given a list of fmris for various versions of + a package from various publishers, trim those + that are not suitable""" + + if pkg_name in self.__pub_trim: # already done + return + self.__mark_pub_trimmed(pkg_name) + + fmri_list = self.__get_catalog_fmris(pkg_name) + + if pkg_name in self.__publisher: + acceptable_pubs = [self.__publisher[pkg_name]] + if pkg_name in self.__installed_dict: + reason_id = _TRIM_PUB_STICKY + reason = ( + N_( + "Currently installed package " + "'{0}' is from sticky publisher '{1}'." + ), + (pkg_name, self.__publisher[pkg_name]), ) - - for child_hold in child_holds: - assert child_hold.pkg_name not in install_holds - install_holds[child_hold.pkg_name] = child_hold.pkg_name - - # versioned_dependents contains all the packages that are - # depended on w/ a explicit version. We now modify this list so - # that it does not contain any packages w/ install_holds, unless - # those holds were relaxed. - versioned_dependents -= set([ - pkg_name - for pkg_name, hold_value in six.iteritems(install_holds) - if hold_value not in relaxed_holds - ]) - # Build the list of fmris that 1) contain incorp. dependencies - # 2) are not in the set of versioned_dependents and 3) do not - # explicitly appear on the install command line. - installed_dict = self.__installed_dict - ret = [ - installed_dict[pkg_name] - for pkg_name in incorps - versioned_dependents - if pkg_name not in proposed_pkgs - if installed_dict[pkg_name] not in self.__removal_fmris - ] - # For each incorporation above that will not change, return a - # list of the fmris that incorporation constrains - con_lists = [ - [ i for i in pkg_cons[inc] ] - for inc in ret + else: + reason_id = _TRIM_PROPOSED_PUB + reason = N_( + "Package is from publisher other " "than specified one." + ) + else: + # order by pub_rank; choose highest possible tier for + # pkgs; guard against unconfigured publishers in known + # catalog + pubs_found = set((f.publisher for f in fmri_list)) + ranked = sorted( + [ + (self.__pub_ranks[p][0], p) + for p in pubs_found + if self.__pub_ranks.get(p, (0, False, False))[2] ] + ) + acceptable_pubs = [r[1] for r in ranked if r[0] == ranked[0][0]] + reason_id = _TRIM_PUB_RANK + if acceptable_pubs: + reason = ( + N_("Higher ranked publisher {0} was " "selected"), + (acceptable_pubs[0],), + ) + else: + reason = N_( + "Package publisher is ranked lower " "in search order" + ) - return ret, con_lists - - def __mark_pub_trimmed(self, pkg_name): - """Record that a given package stem has been trimmed based on - publisher.""" - - self.__pub_trim[pkg_name] = True - - def __filter_publishers(self, pkg_name): - """Given a list of fmris for various versions of - a package from various publishers, trim those - that are not suitable""" - - if pkg_name in self.__pub_trim: # already done - return - self.__mark_pub_trimmed(pkg_name) - - fmri_list = self.__get_catalog_fmris(pkg_name) - - if pkg_name in self.__publisher: - acceptable_pubs = [self.__publisher[pkg_name]] - if pkg_name in self.__installed_dict: - reason_id = _TRIM_PUB_STICKY - reason = (N_("Currently installed package " - "'{0}' is from sticky publisher '{1}'."), - (pkg_name, self.__publisher[pkg_name])) - else: - reason_id = _TRIM_PROPOSED_PUB - reason = N_("Package is from publisher other " - "than specified one.") + # allow installed packages to co-exist to meet dependency reqs. + # in case new publisher not proper superset of original. avoid + # multiple publishers w/ the exact same fmri to prevent + # thrashing in the solver due to many equiv. solutions. + inst_f = self.__installed_dict.get(pkg_name) + self.__trim( + [ + f + for f in fmri_list + if ( + f.publisher not in acceptable_pubs + and (not inst_f or f != inst_f) + ) + or ( + inst_f + and f.publisher != inst_f.publisher + and f.version == inst_f.version + ) + ], + reason_id, + reason, + ) + + # routines to manage the trim dictionary + # trim dictionary contains the reasons an fmri was rejected for + # consideration reason is a tuple of a string w/ format chars and args, + # or just a string. fmri_adds are any fmris that caused the rejection + + def __trim(self, fmri_list, reason_id, reason, fmri_adds=EmptyI): + """Remove specified fmri(s) from consideration for specified + reason.""" + + self.__progress() + assert reason_id in range(_TRIM_MAX) + + # XXX - determine whether this block is still necessary + # - was introduced in 51ff33eef0f3fdaf9954afeeabedd3f842008b50 + # and subsequently moved. + + # There's ugly issue when we receive reason having set of fmris + # with require-any dependencies, which differentiate only by + # timestamp. In this case add operation fails. The workaround + # is to add only the first dependency to the trim dictionary. + # if (type(reason[1]) is tuple and len(reason[1])>1 and + # type(reason[1][1]) is list and len(reason[1][1])>=1): + # reason=(reason[0],(reason[1][0],reason[1][1][0])) + + tup = (reason_id, reason, frozenset(fmri_adds)) + + for fmri in fmri_list: + self.__trim_dict[fmri].add(tup) + + def __trim_older(self, fmri): + """Trim any fmris older than this one""" + reason = (N_("Newer version {0} is already installed"), (fmri,)) + self.__trim( + self.__comb_newer_fmris(fmri, dotrim=False)[1] + - self.__allowed_downgrades, + _TRIM_INSTALLED_NEWER, + reason, + ) + + def __trim_nonmatching_variants(self, fmri): + """Trim packages that don't support image architecture or other + image variant.""" + + vd = self.__get_variant_dict(fmri) + reason = "" + + for v in self.__variants.keys(): + if v in vd and self.__variants[v] not in vd[v]: + if vd == "variant.arch": + reason = N_("Package doesn't support " "image architecture") else: - # order by pub_rank; choose highest possible tier for - # pkgs; guard against unconfigured publishers in known - # catalog - pubs_found = set((f.publisher for f in fmri_list)) - ranked = sorted([ - (self.__pub_ranks[p][0], p) - for p in pubs_found - if self.__pub_ranks.get(p, (0, False, False))[2] - ]) - acceptable_pubs = [ - r[1] - for r in ranked - if r[0] == ranked[0][0] - ] - reason_id = _TRIM_PUB_RANK - if acceptable_pubs: - reason = (N_("Higher ranked publisher {0} was " - "selected"), (acceptable_pubs[0],)) - else: - reason = N_("Package publisher is ranked lower " - "in search order") - - # allow installed packages to co-exist to meet dependency reqs. - # in case new publisher not proper superset of original. avoid - # multiple publishers w/ the exact same fmri to prevent - # thrashing in the solver due to many equiv. solutions. - inst_f = self.__installed_dict.get(pkg_name) - self.__trim([ - f - for f in fmri_list - if (f.publisher not in acceptable_pubs and - (not inst_f or f != inst_f)) or - (inst_f and f.publisher != inst_f.publisher and - f.version == inst_f.version) - ], reason_id, reason) - - # routines to manage the trim dictionary - # trim dictionary contains the reasons an fmri was rejected for - # consideration reason is a tuple of a string w/ format chars and args, - # or just a string. fmri_adds are any fmris that caused the rejection - - def __trim(self, fmri_list, reason_id, reason, fmri_adds=EmptyI): - """Remove specified fmri(s) from consideration for specified - reason.""" + reason = ( + N_( + "Package supports image " + "variant {0}={1} but doesn't " + "support this image's {0}={2}" + ), + (v, str(vd[v]), str(self.__variants[v])), + ) - self.__progress() - assert reason_id in range(_TRIM_MAX) - - # XXX - determine whether this block is still necessary - # - was introduced in 51ff33eef0f3fdaf9954afeeabedd3f842008b50 - # and subsequently moved. - - # There's ugly issue when we receive reason having set of fmris - # with require-any dependencies, which differentiate only by - # timestamp. In this case add operation fails. The workaround - # is to add only the first dependency to the trim dictionary. - #if (type(reason[1]) is tuple and len(reason[1])>1 and - # type(reason[1][1]) is list and len(reason[1][1])>=1): - # reason=(reason[0],(reason[1][0],reason[1][1][0])) - - tup = (reason_id, reason, frozenset(fmri_adds)) - - for fmri in fmri_list: - self.__trim_dict[fmri].add(tup) - - def __trim_older(self, fmri): - """Trim any fmris older than this one""" - reason = (N_("Newer version {0} is already installed"), (fmri,)) - self.__trim(self.__comb_newer_fmris(fmri, dotrim=False)[1] - - self.__allowed_downgrades, _TRIM_INSTALLED_NEWER, reason) - - def __trim_nonmatching_variants(self, fmri): - """Trim packages that don't support image architecture or other - image variant.""" - - vd = self.__get_variant_dict(fmri) - reason = "" - - for v in self.__variants.keys(): - if v in vd and self.__variants[v] not in vd[v]: - if vd == "variant.arch": - reason = N_("Package doesn't support " - "image architecture") - else: - reason = (N_("Package supports image " - "variant {0}={1} but doesn't " - "support this image's {0}={2}"), - (v, str(vd[v]), - str(self.__variants[v]))) - - self.__trim((fmri,), _TRIM_VARIANT, reason) - return reason == "" - - def __trim_nonmatching_parents1(self, pkg_fmri, fmri): - """Private helper function for __trim_nonmatching_parents that - trims any pkg_fmri that matches a parent dependency and that is - not installed in the parent image, that is from a different - publisher than the parent image, or that is a different version - than the parent image.""" - - if fmri in self.__parent_pkgs: - # exact fmri installed in parent - return True - - if fmri.pkg_name not in self.__parent_dict: - # package is not installed in parent - if self.__is_zone(): - reason = (N_("Package {0} is not installed in " - "global zone."), (fmri.pkg_name,)) - else: - reason = (N_("Package {0} is not installed in " - "parent image."), (fmri.pkg_name,)) - self.__trim((pkg_fmri,), _TRIM_PARENT_MISSING, reason) - return False + self.__trim((fmri,), _TRIM_VARIANT, reason) + return reason == "" + + def __trim_nonmatching_parents1(self, pkg_fmri, fmri): + """Private helper function for __trim_nonmatching_parents that + trims any pkg_fmri that matches a parent dependency and that is + not installed in the parent image, that is from a different + publisher than the parent image, or that is a different version + than the parent image.""" + + if fmri in self.__parent_pkgs: + # exact fmri installed in parent + return True + + if fmri.pkg_name not in self.__parent_dict: + # package is not installed in parent + if self.__is_zone(): + reason = ( + N_("Package {0} is not installed in " "global zone."), + (fmri.pkg_name,), + ) + else: + reason = ( + N_("Package {0} is not installed in " "parent image."), + (fmri.pkg_name,), + ) + self.__trim((pkg_fmri,), _TRIM_PARENT_MISSING, reason) + return False + + pf = self.__parent_dict[fmri.pkg_name] + if fmri.publisher and fmri.publisher != pf.publisher: + # package is from a different publisher in the parent + if self.__is_zone(): + reason = ( + N_( + "Package in global zone is from " + "a different publisher: {0}" + ), + (pf,), + ) + else: + reason = ( + N_( + "Package in parent is from a " + "different publisher: {0}" + ), + (pf,), + ) + self.__trim((pkg_fmri,), _TRIM_PARENT_PUB, reason) + return False + + if pf.version == fmri.version: + # parent dependency is satisfied, which applies to both + # DEPEND_SELF and other cases + return True + elif pkg_fmri != fmri and pf.version.is_successor( + fmri.version, version.CONSTRAINT_NONE + ): + # *not* DEPEND_SELF; parent dependency is satisfied + return True + + # version mismatch + if pf.version.is_successor(fmri.version, version.CONSTRAINT_NONE): + reason_id = _TRIM_PARENT_NEWER + if self.__is_zone(): + reason = (N_("Global zone has a " "newer version: {0}"), (pf,)) + else: + reason = (N_("Parent image has a " "newer version: {0}"), (pf,)) + else: + reason_id = _TRIM_PARENT_OLDER + if self.__is_zone(): + reason = ( + N_("Global zone has an older " "version of package: {0}"), + (pf,), + ) + else: + reason = ( + N_("Parent image has an older " "version of package: {0}"), + (pf,), + ) - pf = self.__parent_dict[fmri.pkg_name] - if fmri.publisher and fmri.publisher != pf.publisher: - # package is from a different publisher in the parent - if self.__is_zone(): - reason = (N_("Package in global zone is from " - "a different publisher: {0}"), (pf,)) - else: - reason = (N_("Package in parent is from a " - "different publisher: {0}"), (pf,)) - self.__trim((pkg_fmri,), _TRIM_PARENT_PUB, reason) + self.__trim((pkg_fmri,), reason_id, reason) + return False + + def __trim_nonmatching_parents( + self, pkg_fmri, excludes, ignore_inst_parent_deps=False + ): + """Trim any pkg_fmri that contains a parent dependency that + is not satisfied by the parent image.""" + + # the fmri for the package should include a publisher + assert pkg_fmri.publisher + + # if we're not a child then ignore "parent" dependencies. + if not self.__is_child(): + return True + + # check if we're ignoring parent dependencies for installed + # packages. + if ignore_inst_parent_deps and pkg_fmri in self.__installed_fmris: + return True + + # Find all the fmris that we depend on in our parent. + # Use a set() to eliminate any dups. + pkg_deps = set( + [ + pkg.fmri.PkgFmri(f) + for da in self.__get_dependency_actions(pkg_fmri, excludes) + if da.attrs["type"] == "parent" + for f in da.attrlist("fmri") + ] + ) + + if not pkg_deps: + # no parent dependencies. + return True + self.__linked_pkgs.add(pkg_fmri) + + allowed = True + for f in pkg_deps: + fmri = f + if f.pkg_name == pkg.actions.depend.DEPEND_SELF: + # check if this package depends on itself. + fmri = pkg_fmri + if not self.__trim_nonmatching_parents1(pkg_fmri, fmri): + allowed = False + return allowed + + def __trim_nonmatching_origins( + self, fmri, excludes, exact_install=False, installed_dict_tmp=EmptyDict + ): + """Trim any fmri that contains a origin dependency that is + not satisfied by the current image or root-image""" + + for da in self.__get_dependency_actions(fmri, excludes): + if da.attrs["type"] != "origin": + continue + + req_fmri = pkg.fmri.PkgFmri(da.attrs["fmri"]) + + if da.attrs.get("root-image", "").lower() == "true": + if req_fmri.pkg_name.startswith("feature/firmware/"): + # this is a firmware dependency + fw_ok, reason = self.__firmware.check_firmware( + da, req_fmri.pkg_name + ) + if not fw_ok: + self.__trim((fmri,), _TRIM_FIRMWARE, reason) return False + continue + + if self.__root_fmris is None: + img = pkg.client.image.Image( + misc.liveroot(), + allow_ondisk_upgrade=False, + user_provided_dir=True, + should_exist=True, + ) + self.__root_fmris = dict( + [(f.pkg_name, f) for f in img.gen_installed_pkgs()] + ) - if pf.version == fmri.version: - # parent dependency is satisfied, which applies to both - # DEPEND_SELF and other cases - return True - elif (pkg_fmri != fmri and - pf.version.is_successor(fmri.version, - version.CONSTRAINT_NONE)): - # *not* DEPEND_SELF; parent dependency is satisfied - return True - - # version mismatch - if pf.version.is_successor(fmri.version, - version.CONSTRAINT_NONE): - reason_id = _TRIM_PARENT_NEWER - if self.__is_zone(): - reason = (N_("Global zone has a " - "newer version: {0}"), (pf,)) - else: - reason = (N_("Parent image has a " - "newer version: {0}"), (pf,)) + installed = self.__root_fmris.get(req_fmri.pkg_name) + reason_id = _TRIM_INSTALLED_ROOT_ORIGIN + reason = ( + N_( + "Installed version in root image " + "is too old for origin " + "dependency {0}" + ), + (req_fmri,), + ) + else: + # Always use the full installed dict for origin + # dependency. + if exact_install: + installed = installed_dict_tmp.get(req_fmri.pkg_name) else: - reason_id = _TRIM_PARENT_OLDER - if self.__is_zone(): - reason = (N_("Global zone has an older " - "version of package: {0}"), (pf,)) - else: - reason = (N_("Parent image has an older " - "version of package: {0}"), (pf,)) - - self.__trim((pkg_fmri,), reason_id, reason) - return False - - def __trim_nonmatching_parents(self, pkg_fmri, excludes, - ignore_inst_parent_deps=False): - """Trim any pkg_fmri that contains a parent dependency that - is not satisfied by the parent image.""" - - # the fmri for the package should include a publisher - assert pkg_fmri.publisher - - # if we're not a child then ignore "parent" dependencies. - if not self.__is_child(): - return True - - # check if we're ignoring parent dependencies for installed - # packages. - if ignore_inst_parent_deps and \ - pkg_fmri in self.__installed_fmris: - return True - - # Find all the fmris that we depend on in our parent. - # Use a set() to eliminate any dups. - pkg_deps = set([ - pkg.fmri.PkgFmri(f) - for da in self.__get_dependency_actions(pkg_fmri, excludes) - if da.attrs["type"] == "parent" - for f in da.attrlist("fmri") - ]) - - if not pkg_deps: - # no parent dependencies. - return True - self.__linked_pkgs.add(pkg_fmri) - - allowed = True - for f in pkg_deps: - fmri = f - if f.pkg_name == pkg.actions.depend.DEPEND_SELF: - # check if this package depends on itself. - fmri = pkg_fmri - if not self.__trim_nonmatching_parents1(pkg_fmri, fmri): - allowed = False - return allowed - - def __trim_nonmatching_origins(self, fmri, excludes, - exact_install=False, installed_dict_tmp=EmptyDict): - """Trim any fmri that contains a origin dependency that is - not satisfied by the current image or root-image""" - - for da in self.__get_dependency_actions(fmri, excludes): - if da.attrs["type"] != "origin": - continue - - req_fmri = pkg.fmri.PkgFmri(da.attrs["fmri"]) - - if da.attrs.get("root-image", "").lower() == "true": - if req_fmri.pkg_name.startswith( - "feature/firmware/"): - # this is a firmware dependency - fw_ok, reason = \ - self.__firmware.check_firmware(da, - req_fmri.pkg_name) - if not fw_ok: - self.__trim((fmri,), - _TRIM_FIRMWARE, reason) - return False - continue - - if self.__root_fmris is None: - img = pkg.client.image.Image( - misc.liveroot(), - allow_ondisk_upgrade=False, - user_provided_dir=True, - should_exist=True) - self.__root_fmris = dict([ - (f.pkg_name, f) - for f in img.gen_installed_pkgs() - ]) - - installed = self.__root_fmris.get( - req_fmri.pkg_name) - reason_id = _TRIM_INSTALLED_ROOT_ORIGIN - reason = (N_("Installed version in root image " - "is too old for origin " "dependency {0}"), - (req_fmri,)) - else: - # Always use the full installed dict for origin - # dependency. - if exact_install: - installed = installed_dict_tmp.get( - req_fmri.pkg_name) - else: - installed = self.__installed_dict.get( - req_fmri.pkg_name) - reason_id = _TRIM_INSTALLED_ORIGIN - reason = (N_("Installed version in image " - "being upgraded is too old for origin " - "dependency {0}"), (req_fmri,)) - - # assumption is that for root-image, publishers align; - # otherwise these sorts of cross-environment - # dependencies don't work well - - if (not installed or not req_fmri.version or - req_fmri.version == installed.version or - installed.version.is_successor(req_fmri.version, - version.CONSTRAINT_NONE)): - continue - - self.__trim((fmri,), reason_id, reason) - - return False - return True + installed = self.__installed_dict.get(req_fmri.pkg_name) + reason_id = _TRIM_INSTALLED_ORIGIN + reason = ( + N_( + "Installed version in image " + "being upgraded is too old for origin " + "dependency {0}" + ), + (req_fmri,), + ) - def __trim_unsupported(self, fmri): - """Indicate given package FMRI is unsupported.""" - self.__trim((fmri,), _TRIM_UNSUPPORTED, - N_("Package contains invalid or unsupported actions")) + # assumption is that for root-image, publishers align; + # otherwise these sorts of cross-environment + # dependencies don't work well - def __get_older_incorp_pkgs(self, fmri, install_holds, excludes=EmptyI, - relax_all=False, depth=0): - """Get all incorporated pkgs for the given 'fmri' whose versions - are older than what is currently installed in the image.""" + if ( + not installed + or not req_fmri.version + or req_fmri.version == installed.version + or installed.version.is_successor( + req_fmri.version, version.CONSTRAINT_NONE + ) + ): + continue + + self.__trim((fmri,), reason_id, reason) + + return False + return True + + def __trim_unsupported(self, fmri): + """Indicate given package FMRI is unsupported.""" + self.__trim( + (fmri,), + _TRIM_UNSUPPORTED, + N_("Package contains invalid or unsupported actions"), + ) + + def __get_older_incorp_pkgs( + self, fmri, install_holds, excludes=EmptyI, relax_all=False, depth=0 + ): + """Get all incorporated pkgs for the given 'fmri' whose versions + are older than what is currently installed in the image.""" + + candidates = set() + if fmri in self.__dg_incorp_cache: + candidates |= self.__dg_incorp_cache[fmri] + return candidates + + if depth > 10: + # Safeguard against circular dependencies. + # If it happens, just end the recursion tree. + return candidates + + self.__dg_incorp_cache[fmri] = set() + self.__progress() + + # Get all matching incorporated packages for this fmri; this is + # a list of sets, where each set represents all of the fmris + # matching the incorporate dependency for a single package stem. + # + # Only add potential FMRIs to the list of allowed downgrades if + # the currently installed version is not allowed by the related + # incorporate dependency. This prevents two undesirable + # behaviours: + # + # - downgrades when a package is no longer incorporated in + # a newer version of an incorporating package and an older + # version is otherwise allowed + # - upgrades of packages that are no longer incorporated + # in a newer version of an incorporating package and a newer + # version is otherwise allowed + for matchdg, nonmatchdg in six.itervalues( + self.__get_incorp_nonmatch_dict(fmri, excludes) + ): + match = next(iter(matchdg), None) + if not match or match.pkg_name not in self.__installed_dict: + continue + + inst_fmri = self.__installed_dict[match.pkg_name] + if inst_fmri in matchdg: + continue + + inst_ver = inst_fmri.version + for df in matchdg: + if df.version == inst_ver: + # If installed version is not changing, + # there is no need to check for + # downgraded incorporate deps. + continue + + is_successor = df.version.is_successor(inst_ver, None) + if relax_all and is_successor: + # If all install-holds are relaxed, and + # this package is being upgraded, it is + # not a downgrade candidate and there is + # no need to recursively check for + # downgraded incorporate deps here as + # will be checked directly later in + # solve_update_all. + continue + + # Do not allow implicit publisher switches. + if df.publisher != fmri.publisher: + continue + + # Do not allow pkgs marked for removal. + if fmri in self.__removal_fmris: + continue + + # Do not allow pkgs with install-holds but filter out + # child holds + install_hold = False + for ha in [ + sa + for sa in self.__get_actions(df, "set") + if sa.attrs["name"] == "pkg.depend.install-hold" + ]: + install_hold = True + for h in install_holds: + if ha.attrs["value"].startswith(h): + # This is a child hold + # of an incorporating + # pkg, ignore. + install_hold = False + break + if not install_hold: + break + if install_hold: + continue + + if not is_successor: + self.__dg_incorp_cache[fmri].add(df) + candidates.add(df) + + if not relax_all: + # If all install-holds are not relaxed, + # then we need to check if pkg has + # incorporate deps of its own since not + # every package is being checked + # individually. + candidates |= self.__get_older_incorp_pkgs( + df, + install_holds, + excludes=excludes, + relax_all=relax_all, + depth=depth + 1, + ) - candidates = set() - if fmri in self.__dg_incorp_cache: - candidates |= self.__dg_incorp_cache[fmri] - return candidates + return candidates - if depth > 10: - # Safeguard against circular dependencies. - # If it happens, just end the recursion tree. - return candidates + def __allow_incorp_downgrades(self, fmri, excludes=EmptyI, relax_all=False): + """Find packages which have lower versions than installed but + are incorporated by a package in the proposed list.""" - self.__dg_incorp_cache[fmri] = set() - self.__progress() + install_holds = set( + [ + sa.attrs["value"] + for sa in self.__get_actions(fmri, "set") + if sa.attrs["name"] == "pkg.depend.install-hold" + ] + ) - # Get all matching incorporated packages for this fmri; this is - # a list of sets, where each set represents all of the fmris - # matching the incorporate dependency for a single package stem. - # - # Only add potential FMRIs to the list of allowed downgrades if - # the currently installed version is not allowed by the related - # incorporate dependency. This prevents two undesirable - # behaviours: - # - # - downgrades when a package is no longer incorporated in - # a newer version of an incorporating package and an older - # version is otherwise allowed - # - upgrades of packages that are no longer incorporated - # in a newer version of an incorporating package and a newer - # version is otherwise allowed - for matchdg, nonmatchdg in six.itervalues(self.__get_incorp_nonmatch_dict(fmri, - excludes)): - match = next(iter(matchdg), None) - if (not match or - match.pkg_name not in self.__installed_dict): - continue - - inst_fmri = self.__installed_dict[match.pkg_name] - if inst_fmri in matchdg: - continue - - inst_ver = inst_fmri.version - for df in matchdg: - if df.version == inst_ver: - # If installed version is not changing, - # there is no need to check for - # downgraded incorporate deps. - continue - - is_successor = df.version.is_successor(inst_ver, - None) - if relax_all and is_successor: - # If all install-holds are relaxed, and - # this package is being upgraded, it is - # not a downgrade candidate and there is - # no need to recursively check for - # downgraded incorporate deps here as - # will be checked directly later in - # solve_update_all. - continue - - # Do not allow implicit publisher switches. - if df.publisher != fmri.publisher: - continue - - # Do not allow pkgs marked for removal. - if fmri in self.__removal_fmris: - continue - - # Do not allow pkgs with install-holds but filter out - # child holds - install_hold = False - for ha in [ - sa - for sa in self.__get_actions(df, "set") - if sa.attrs["name"] == - "pkg.depend.install-hold" - ]: - install_hold = True - for h in install_holds: - if ha.attrs["value"].startswith( - h): - # This is a child hold - # of an incorporating - # pkg, ignore. - install_hold = False - break - if not install_hold: - break - if install_hold: - continue - - if not is_successor: - self.__dg_incorp_cache[fmri].add(df) - candidates.add(df) - - if not relax_all: - # If all install-holds are not relaxed, - # then we need to check if pkg has - # incorporate deps of its own since not - # every package is being checked - # individually. - candidates |= \ - self.__get_older_incorp_pkgs(df, - install_holds, - excludes=excludes, - relax_all=relax_all, - depth=depth + 1) - - return candidates - - def __allow_incorp_downgrades(self, fmri, excludes=EmptyI, - relax_all=False): - """Find packages which have lower versions than installed but - are incorporated by a package in the proposed list.""" - - install_holds = set([ - sa.attrs["value"] - for sa in self.__get_actions(fmri, "set") - if sa.attrs["name"] == "pkg.depend.install-hold" - ]) + # Get all pkgs which are incorporated by 'fmri', + # including nested incorps. + candidates = self.__get_older_incorp_pkgs( + fmri, install_holds, excludes=excludes, relax_all=relax_all + ) - # Get all pkgs which are incorporated by 'fmri', - # including nested incorps. - candidates = self.__get_older_incorp_pkgs(fmri, install_holds, - excludes=excludes, relax_all=relax_all) + return candidates - return candidates + def __dotrim(self, fmri_list): + """Return fmri_list trimmed of any fmris in self.__trim_dict""" - def __dotrim(self, fmri_list): - """Return fmri_list trimmed of any fmris in self.__trim_dict""" + return [f for f in fmri_list if not self.__trim_dict.get(f)] - return [ - f - for f in fmri_list - if not self.__trim_dict.get(f) - ] + def __is_child(self): + """Return True if this image is a linked image child.""" + return self.__parent_pkgs is not None - def __is_child(self): - """Return True if this image is a linked image child.""" - return self.__parent_pkgs is not None + def __is_zone(self): + """Return True if image is a nonglobal zone""" + if "variant.opensolaris.zone" in self.__variants: + return self.__variants["variant.opensolaris.zone"] == "nonglobal" + else: + return False - def __is_zone(self): - """Return True if image is a nonglobal zone""" - if 'variant.opensolaris.zone' in self.__variants: - return self.__variants['variant.opensolaris.zone'] == \ - 'nonglobal' - else: - return False # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/pkgdefs.py b/src/modules/client/pkgdefs.py index 072918384..8f31d8f78 100644 --- a/src/modules/client/pkgdefs.py +++ b/src/modules/client/pkgdefs.py @@ -30,130 +30,136 @@ """ # pkg exit codes -EXIT_OK = 0 # Command succeeded. -EXIT_OOPS = 1 # An error occurred. -EXIT_BADOPT = 2 # Invalid command line options were specified. -EXIT_PARTIAL = 3 # Multiple ops were requested, but not all succeeded. -EXIT_NOP = 4 # No changes were made - nothing to do. -EXIT_NOTLIVE = 5 # The requested op cannot be performed on a live image. -EXIT_LICENSE = 6 # License acceptance required for requested op. -EXIT_LOCKED = 7 # Image is currently locked by another process -EXIT_ACTUATOR = 8 # Actuator timed out -EXIT_CONSTRAINED = 9 # Overly constrained +EXIT_OK = 0 # Command succeeded. +EXIT_OOPS = 1 # An error occurred. +EXIT_BADOPT = 2 # Invalid command line options were specified. +EXIT_PARTIAL = 3 # Multiple ops were requested, but not all succeeded. +EXIT_NOP = 4 # No changes were made - nothing to do. +EXIT_NOTLIVE = 5 # The requested op cannot be performed on a live image. +EXIT_LICENSE = 6 # License acceptance required for requested op. +EXIT_LOCKED = 7 # Image is currently locked by another process +EXIT_ACTUATOR = 8 # Actuator timed out +EXIT_CONSTRAINED = 9 # Overly constrained # private pkg exit codes -EXIT_EACCESS = 51 # Can't access requested image -EXIT_DIVERGED = 52 # Image is not in sync with its constraints -EXIT_NOPARENT = 53 # Image is not linked to a parent image -EXIT_PARENTOP = 54 # Linked operation must be done from parent +EXIT_EACCESS = 51 # Can't access requested image +EXIT_DIVERGED = 52 # Image is not in sync with its constraints +EXIT_NOPARENT = 53 # Image is not linked to a parent image +EXIT_PARENTOP = 54 # Linked operation must be done from parent # package operations -PKG_OP_ATTACH = "attach-linked" -PKG_OP_AUDIT_LINKED = "audit-linked" -PKG_OP_CHANGE_FACET = "change-facet" -PKG_OP_CHANGE_VARIANT = "change-variant" -PKG_OP_DEHYDRATE = "dehydrate" -PKG_OP_DETACH = "detach-linked" -PKG_OP_EXACT_INSTALL = "exact-install" -PKG_OP_FIX = "fix" -PKG_OP_FLAG = "flag" -PKG_OP_INFO = "info" -PKG_OP_INSTALL = "install" -PKG_OP_LIST = "list" -PKG_OP_LIST_LINKED = "list-linked" -PKG_OP_PROP_LINKED = "property-linked" -PKG_OP_PUBCHECK = "pubcheck-linked" -PKG_OP_PUBLISHER_LIST = "publisher" -PKG_OP_REHYDRATE = "rehydrate" -PKG_OP_REVERT = "revert" -PKG_OP_SET_MEDIATOR = "set-mediator" -PKG_OP_SET_PUBLISHER = "set-publisher" +PKG_OP_ATTACH = "attach-linked" +PKG_OP_AUDIT_LINKED = "audit-linked" +PKG_OP_CHANGE_FACET = "change-facet" +PKG_OP_CHANGE_VARIANT = "change-variant" +PKG_OP_DEHYDRATE = "dehydrate" +PKG_OP_DETACH = "detach-linked" +PKG_OP_EXACT_INSTALL = "exact-install" +PKG_OP_FIX = "fix" +PKG_OP_FLAG = "flag" +PKG_OP_INFO = "info" +PKG_OP_INSTALL = "install" +PKG_OP_LIST = "list" +PKG_OP_LIST_LINKED = "list-linked" +PKG_OP_PROP_LINKED = "property-linked" +PKG_OP_PUBCHECK = "pubcheck-linked" +PKG_OP_PUBLISHER_LIST = "publisher" +PKG_OP_REHYDRATE = "rehydrate" +PKG_OP_REVERT = "revert" +PKG_OP_SET_MEDIATOR = "set-mediator" +PKG_OP_SET_PUBLISHER = "set-publisher" PKG_OP_SET_PROP_LINKED = "set-property-linked" -PKG_OP_SYNC = "sync-linked" -PKG_OP_UNINSTALL = "uninstall" +PKG_OP_SYNC = "sync-linked" +PKG_OP_UNINSTALL = "uninstall" PKG_OP_UNSET_PUBLISHER = "unset-publisher" -PKG_OP_UPDATE = "update" -PKG_OP_APPLY_HOT_FIX = "apply-hot-fix" -PKG_OP_AUTOREMOVE = "autoremove" -PKG_OP_HOTFIX_CLEANUP = "clean-up-hot-fix" -PKG_OP_VERIFY = "verify" -pkg_op_values = frozenset([ - PKG_OP_ATTACH, - PKG_OP_AUDIT_LINKED, - PKG_OP_CHANGE_FACET, - PKG_OP_CHANGE_VARIANT, - PKG_OP_DEHYDRATE, - PKG_OP_DETACH, - PKG_OP_EXACT_INSTALL, - PKG_OP_FIX, - PKG_OP_FLAG, - PKG_OP_INFO, - PKG_OP_INSTALL, - PKG_OP_LIST, - PKG_OP_LIST_LINKED, - PKG_OP_PROP_LINKED, - PKG_OP_PUBCHECK, - PKG_OP_PUBLISHER_LIST, - PKG_OP_REVERT, - PKG_OP_REHYDRATE, - PKG_OP_SET_MEDIATOR, - PKG_OP_SET_PUBLISHER, - PKG_OP_SET_PROP_LINKED, - PKG_OP_SYNC, - PKG_OP_UNINSTALL, - PKG_OP_UNSET_PUBLISHER, - PKG_OP_UPDATE, - PKG_OP_APPLY_HOT_FIX, - PKG_OP_AUTOREMOVE, - PKG_OP_HOTFIX_CLEANUP, - PKG_OP_VERIFY, -]) - -API_OP_ATTACH = "attach-linked" -API_OP_CHANGE_FACET = "change-facet" +PKG_OP_UPDATE = "update" +PKG_OP_APPLY_HOT_FIX = "apply-hot-fix" +PKG_OP_AUTOREMOVE = "autoremove" +PKG_OP_HOTFIX_CLEANUP = "clean-up-hot-fix" +PKG_OP_VERIFY = "verify" +pkg_op_values = frozenset( + [ + PKG_OP_ATTACH, + PKG_OP_AUDIT_LINKED, + PKG_OP_CHANGE_FACET, + PKG_OP_CHANGE_VARIANT, + PKG_OP_DEHYDRATE, + PKG_OP_DETACH, + PKG_OP_EXACT_INSTALL, + PKG_OP_FIX, + PKG_OP_FLAG, + PKG_OP_INFO, + PKG_OP_INSTALL, + PKG_OP_LIST, + PKG_OP_LIST_LINKED, + PKG_OP_PROP_LINKED, + PKG_OP_PUBCHECK, + PKG_OP_PUBLISHER_LIST, + PKG_OP_REVERT, + PKG_OP_REHYDRATE, + PKG_OP_SET_MEDIATOR, + PKG_OP_SET_PUBLISHER, + PKG_OP_SET_PROP_LINKED, + PKG_OP_SYNC, + PKG_OP_UNINSTALL, + PKG_OP_UNSET_PUBLISHER, + PKG_OP_UPDATE, + PKG_OP_APPLY_HOT_FIX, + PKG_OP_AUTOREMOVE, + PKG_OP_HOTFIX_CLEANUP, + PKG_OP_VERIFY, + ] +) + +API_OP_ATTACH = "attach-linked" +API_OP_CHANGE_FACET = "change-facet" API_OP_CHANGE_VARIANT = "change-variant" -API_OP_DEHYDRATE = "dehydrate" -API_OP_DETACH = "detach-linked" -API_OP_EXACT_INSTALL = "exact-install" -API_OP_FIX = "fix" -API_OP_INSTALL = "install" -API_OP_REHYDRATE = "rehydrate" -API_OP_REPAIR = "repair" -API_OP_REVERT = "revert" -API_OP_SET_MEDIATOR = "set-mediator" -API_OP_SYNC = "sync-linked" -API_OP_UNINSTALL = "uninstall" -API_OP_UPDATE = "update" -API_OP_VERIFY = "verify" -api_op_values = frozenset([ - API_OP_ATTACH, - API_OP_CHANGE_FACET, - API_OP_CHANGE_VARIANT, - API_OP_DETACH, - API_OP_DEHYDRATE, - API_OP_EXACT_INSTALL, - API_OP_FIX, - API_OP_INSTALL, - API_OP_REHYDRATE, - API_OP_REPAIR, - API_OP_REVERT, - API_OP_SET_MEDIATOR, - API_OP_SYNC, - API_OP_UNINSTALL, - API_OP_UPDATE, - API_OP_VERIFY -]) - -API_STAGE_DEFAULT = "default" -API_STAGE_PLAN = "plan" -API_STAGE_PREPARE = "prepare" -API_STAGE_EXECUTE = "execute" -api_stage_values = frozenset([ - API_STAGE_DEFAULT, - API_STAGE_PLAN, - API_STAGE_PREPARE, - API_STAGE_EXECUTE, -]) +API_OP_DEHYDRATE = "dehydrate" +API_OP_DETACH = "detach-linked" +API_OP_EXACT_INSTALL = "exact-install" +API_OP_FIX = "fix" +API_OP_INSTALL = "install" +API_OP_REHYDRATE = "rehydrate" +API_OP_REPAIR = "repair" +API_OP_REVERT = "revert" +API_OP_SET_MEDIATOR = "set-mediator" +API_OP_SYNC = "sync-linked" +API_OP_UNINSTALL = "uninstall" +API_OP_UPDATE = "update" +API_OP_VERIFY = "verify" +api_op_values = frozenset( + [ + API_OP_ATTACH, + API_OP_CHANGE_FACET, + API_OP_CHANGE_VARIANT, + API_OP_DETACH, + API_OP_DEHYDRATE, + API_OP_EXACT_INSTALL, + API_OP_FIX, + API_OP_INSTALL, + API_OP_REHYDRATE, + API_OP_REPAIR, + API_OP_REVERT, + API_OP_SET_MEDIATOR, + API_OP_SYNC, + API_OP_UNINSTALL, + API_OP_UPDATE, + API_OP_VERIFY, + ] +) + +API_STAGE_DEFAULT = "default" +API_STAGE_PLAN = "plan" +API_STAGE_PREPARE = "prepare" +API_STAGE_EXECUTE = "execute" +api_stage_values = frozenset( + [ + API_STAGE_DEFAULT, + API_STAGE_PLAN, + API_STAGE_PREPARE, + API_STAGE_EXECUTE, + ] +) # # Please note that the values of these PKG_STATE constants should not @@ -190,8 +196,8 @@ # These states are used to indicate why a package was rejected and # is not available for packaging operations. -PKG_STATE_UNSUPPORTED = 10 # Package contains invalid or - # unsupported metadata. +PKG_STATE_UNSUPPORTED = 10 # Package contains invalid or +# unsupported metadata. # This state indicates that this package is frozen. PKG_STATE_FROZEN = 11 @@ -223,4 +229,4 @@ MSG_UNPACKAGED = "unpackaged" # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/pkgplan.py b/src/modules/client/pkgplan.py index 9047e8504..6b710886f 100644 --- a/src/modules/client/pkgplan.py +++ b/src/modules/client/pkgplan.py @@ -47,678 +47,714 @@ logger = global_settings.logger -class PkgPlan(object): - """A package plan takes two package FMRIs and an Image, and produces the - set of actions required to take the Image from the origin FMRI to the - destination FMRI. - - If the destination FMRI is None, the package is removed. - """ - __slots__ = [ +class PkgPlan(object): + """A package plan takes two package FMRIs and an Image, and produces the + set of actions required to take the Image from the origin FMRI to the + destination FMRI. + + If the destination FMRI is None, the package is removed. + """ + + __slots__ = [ + "__destination_mfst", + "_executed", + "_license_status", + "__origin_mfst", + "__repair_actions", + "__salvage_actions", + "__xferfiles", + "__xfersize", + "_autofix_pkgs", + "_hash", + "actions", + "destination_fmri", + "image", + "origin_fmri", + "pkg_summary", + ] + + # + # we don't serialize __xferfiles or __xfersize since those should be + # recalculated after after a plan is loaded (since the contents of the + # download cache may have changed). + # + # we don't serialize __origin_mfst, __destination_mfst, or + # __repair_actions since we only support serializing pkgplans which + # have had their actions evaluated and merged, and when action + # evaluation is complete these fields are cleared. + # + # we don't serialize our image object pointer. that has to be reset + # after this object is reloaded. + # + __state__noserialize = frozenset( + [ "__destination_mfst", - "_executed", - "_license_status", "__origin_mfst", "__repair_actions", "__salvage_actions", "__xferfiles", "__xfersize", - "_autofix_pkgs", - "_hash", - "actions", - "destination_fmri", "image", - "origin_fmri", - "pkg_summary", ] + ) + + # make sure all __state__noserialize values are valid + assert (__state__noserialize - set(__slots__)) == set() + # figure out which state we are saving. + __state__serialize = set(__slots__) - __state__noserialize + + # describe our state and the types of all objects + __state__desc = { + "_autofix_pkgs": [pkg.fmri.PkgFmri], + "_license_status": { + six.string_types[0]: { + "src": pkg.actions.generic.NSG, + "dest": pkg.actions.generic.NSG, + }, + }, + "actions": pkg.manifest.ManifestDifference, + "destination_fmri": pkg.fmri.PkgFmri, + "origin_fmri": pkg.fmri.PkgFmri, + } + + __state__commonize = frozenset( + [ + pkg.fmri.PkgFmri, + ] + ) + + def __init__(self, image=None): + self.destination_fmri = None + self.__destination_mfst = manifest.NullFactoredManifest + + self.origin_fmri = None + self.__origin_mfst = manifest.NullFactoredManifest + + self.actions = manifest.ManifestDifference([], [], []) + self.image = image + self.pkg_summary = None + + self._executed = False + self._license_status = {} + self.__repair_actions = {} + self.__salvage_actions = {} + self.__xferfiles = -1 + self.__xfersize = -1 + self._autofix_pkgs = [] + self._hash = None + + @staticmethod + def getstate(obj, je_state=None): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + + # validate unserialized state + # (see comments above __state__noserialize) + assert obj.__origin_mfst == manifest.NullFactoredManifest + assert obj.__destination_mfst == manifest.NullFactoredManifest + assert obj.__repair_actions == {} + + # we use __slots__, so create a state dictionary + state = {} + for k in obj.__state__serialize: + state[k] = getattr(obj, k) + + return pkg.misc.json_encode( + PkgPlan.__name__, + state, + PkgPlan.__state__desc, + commonize=PkgPlan.__state__commonize, + je_state=je_state, + ) + + @staticmethod + def setstate(obj, state, jd_state=None): + """Update the state of this object using previously serialized + state obtained via getstate().""" + + # get the name of the object we're dealing with + name = type(obj).__name__ + + # decode serialized state into python objects + state = pkg.misc.json_decode( + name, + state, + PkgPlan.__state__desc, + commonize=PkgPlan.__state__commonize, + jd_state=jd_state, + ) + + # we use __slots__, so directly update attributes + for k in state: + setattr(obj, k, state[k]) + + # update unserialized state + # (see comments above __state__noserialize) + obj.__origin_mfst = manifest.NullFactoredManifest + obj.__destination_mfst = manifest.NullFactoredManifest + obj.__repair_actions = {} + obj.__xferfiles = -1 + obj.__xfersize = -1 + obj.image = None + + @staticmethod + def fromstate(state, jd_state=None): + """Allocate a new object using previously serialized state + obtained via getstate().""" + rv = PkgPlan() + PkgPlan.setstate(rv, state, jd_state) + return rv + + def __str__(self): + s = "{0} -> {1}\n".format(self.origin_fmri, self.destination_fmri) + for src, dest in itertools.chain(*self.actions): + s += " {0} -> {1}\n".format(src, dest) + return s + + def __add_license(self, src, dest): + """Adds a license status entry for the given src and dest + license actions. + + 'src' should be None or the source action for a license. + + 'dest' must be the destination action for a license.""" + + self._license_status[dest.attrs["license"]] = { + "src": src, + "dest": dest, + "accepted": False, + "displayed": False, + } + + def propose(self, of, om, df, dm): + """Propose origin and dest fmri, manifest""" + self.origin_fmri = of + self.__origin_mfst = om + self.destination_fmri = df + self.__destination_mfst = dm + + def __get_orig_act(self, dest): + """Generate the on-disk state (attributes) of the action + that fail verification.""" + + if not dest.has_payload or "path" not in dest.attrs: + return + + path = os.path.join(self.image.root, dest.attrs["path"]) + try: + pstat = os.lstat(path) + except Exception: + # If file to repair isn't on-disk, treat as install + return + + act = pkg.actions.fromstr(str(dest)) + act.attrs["mode"] = oct(stat.S_IMODE(pstat.st_mode)) + try: + owner = pwd.getpwuid(pstat.st_uid).pw_name + group = grp.getgrgid(pstat.st_gid).gr_name + except KeyError: + # If associated user / group can't be determined, treat + # as install. This is not optimal for repairs, but + # ensures proper ownership of file is set. + return + act.attrs["owner"] = owner + act.attrs["group"] = group + + # No need to generate hash of on-disk content as verify + # short-circuits hash comparison by setting replace_required + # flag on action. The same is true for preserved files which + # will automatically handle content replacement if needed based + # on the result of _check_preserve. + return act + + def propose_repair(self, fmri, mfst, install, remove, autofix=False): + self.propose(fmri, mfst, fmri, mfst) + # self.origin_fmri = None + # I'd like a cleaner solution than this; we need to actually + # construct a list of actions as things currently are rather + # than just re-applying the current set of actions. # - # we don't serialize __xferfiles or __xfersize since those should be - # recalculated after after a plan is loaded (since the contents of the - # download cache may have changed). + # Create a list of (src, dst) pairs for the actions to send to + # execute_repair. + + if autofix: + # If an uninstall causes a fixup to happen, we can't + # generate an on-disk state action because the result + # of needsdata is different between propose and execute. + # Therefore, we explicitly assign None to src for actions + # to be installed. + self.__repair_actions = { + # src is none for repairs + "install": [(None, x) for x in install], + # dest is none for removals. + "remove": [(x, None) for x in remove], + } + self._autofix_pkgs.append(fmri) + else: + self.__repair_actions = { + # src can be None or an action representing on-disk state + "install": [(self.__get_orig_act(x), x) for x in install], + "remove": [(x, None) for x in remove], + } + + def get_actions(self): + raise NotImplementedError() + + def get_nactions(self): + return ( + len(self.actions.added) + + len(self.actions.changed) + + len(self.actions.removed) + ) + + def update_pkg_set(self, fmri_set): + """updates a set of installed fmris to reflect + proposed new state""" + + if self.origin_fmri: + fmri_set.discard(self.origin_fmri) + + if self.destination_fmri: + fmri_set.add(self.destination_fmri) + + def evaluate( + self, old_excludes=EmptyI, new_excludes=EmptyI, can_exclude=False + ): + """Determine the actions required to transition the package.""" + + # If new actions are being installed, check the destination + # manifest for signatures. + if self.destination_fmri is not None: + try: + dest_pub = self.image.get_publisher( + prefix=self.destination_fmri.publisher + ) + except apx.UnknownPublisher: + # Since user removed publisher, assume this is + # the same as if they had set signature-policy + # ignore for the publisher. + sig_pol = None + else: + sig_pol = self.image.signature_policy.combine( + dest_pub.signature_policy + ) + + if self.destination_fmri in self._autofix_pkgs: + # Repaired packages use a manifest synthesized + # from the installed one; so retrieve the + # installed one for our signature checks. + sigman = self.image.get_manifest( + self.destination_fmri, ignore_excludes=True + ) + else: + sigman = self.__destination_mfst + + sigs = list( + sigman.gen_actions_by_type("signature", excludes=new_excludes) + ) + if sig_pol and (sigs or sig_pol.name != "ignore"): + # Only perform signature verification logic if + # there are signatures or if signature-policy + # is not 'ignore'. + + try: + sig_pol.process_signatures( + sigs, + sigman.gen_actions(), + dest_pub, + self.image.trust_anchors, + self.image.cfg.get_policy( + "check-certificate-revocation" + ), + ) + except apx.SigningException as e: + e.pfmri = self.destination_fmri + if isinstance(e, apx.BrokenChain): + e.ext_exs.extend(self.image.bad_trust_anchors) + raise + if can_exclude: + if self.__destination_mfst is not None: + self.__destination_mfst.exclude_content(new_excludes) + if ( + self.__origin_mfst is not None + and self.__destination_mfst != self.__origin_mfst + ): + self.__origin_mfst.exclude_content(old_excludes) + old_excludes = EmptyI + new_excludes = EmptyI + + self.actions = self.__destination_mfst.difference( + self.__origin_mfst, old_excludes, new_excludes, pkgplan=self + ) + + # figure out how many implicit directories disappear in this + # transition and add directory remove actions. These won't + # do anything unless no pkgs reference that directory in + # new state.... + + # Retrieving origin_dirs first and then checking it for any + # entries allows avoiding an unnecessary expanddirs for the + # destination manifest when it isn't needed. + origin_dirs = expanddirs( + self.__origin_mfst.get_directories(old_excludes) + ) + + # Manifest.get_directories() returns implicit directories, which + # means that this computation ends up re-adding all the explicit + # directories getting removed to the removed list. This is + # ugly, but safe. + if origin_dirs: + absent_dirs = origin_dirs - expanddirs( + self.__destination_mfst.get_directories(new_excludes) + ) + + for a in absent_dirs: + self.actions.removed.append( + (directory.DirectoryAction(path=a, implicit="True"), None) + ) + + # Stash information needed by legacy actions. + self.pkg_summary = self.__destination_mfst.get( + "pkg.summary", + self.__destination_mfst.get("description", "none provided"), + ) + + # Add any install repair actions to the update list + self.actions.changed.extend( + self.__repair_actions.get("install", EmptyI) + ) + self.actions.removed.extend(self.__repair_actions.get("remove", EmptyI)) + + # No longer needed. + self.__repair_actions = {} + + for src, dest in itertools.chain( + self.gen_update_actions(), self.gen_install_actions() + ): + if dest.name == "license": + self.__add_license(src, dest) + if not src: + # Initial installs require acceptance. + continue + src_ma = src.attrs.get("must-accept", False) + dest_ma = dest.attrs.get("must-accept", False) + if (dest_ma and src_ma) and src.hash == dest.hash: + # If src action required acceptance, + # then license was already accepted + # before, and if the hashes are the + # same for the license payload, then + # it doesn't need to be accepted again. + self.set_license_status( + dest.attrs["license"], accepted=True + ) + + # Keep a cache of dir actions with salvage-from attrs. + # Since directories are installed in top-down order, + # we need this list to make sure we salvage contents + # as accurately as possible. For example, where: + # + # /var/user gets salvaged to /var/.migrate/user + # and + # /var/user/myuser/.ssh to /var/.migrate/user/myuser/.ssh + # + # We must ensure that we don't try to salvage + # var/user/myuser/.ssh when installing + # /var/.migrate/user, + # but instead wait till /var/.migrate/user/myuser/.ssh + # is being installed, otherwise that content will + # the salvaged to the wrong place. + if dest.name == "dir" and "salvage-from" in dest.attrs: + for p in dest.attrlist("salvage-from"): + self.__salvage_actions[p] = dest + + def get_licenses(self): + """A generator function that yields tuples of the form (license, + entry). Where 'entry' is a dict containing the license status + information.""" + + for lic, entry in six.iteritems(self._license_status): + yield lic, entry + + def set_license_status(self, plicense, accepted=None, displayed=None): + """Sets the license status for the given license entry. + + 'plicense' should be the value of the license attribute for the + destination license action. + + 'accepted' is an optional parameter that can be one of three + values: + None leaves accepted status unchanged + False sets accepted status to False + True sets accepted status to True + + 'displayed' is an optional parameter that can be one of three + values: + None leaves displayed status unchanged + False sets displayed status to False + True sets displayed status to True""" + + entry = self._license_status[plicense] + if accepted is not None: + entry["accepted"] = accepted + if displayed is not None: + entry["displayed"] = displayed + + def get_xferstats(self): + if self.__xfersize != -1: + return (self.__xferfiles, self.__xfersize) + + self.__xfersize = 0 + self.__xferfiles = 0 + for src, dest in itertools.chain(*self.actions): + if dest and dest.needsdata(src, self): + self.__xfersize += get_pkg_otw_size(dest) + self.__xferfiles += 1 + if dest.name == "signature": + self.__xfersize += dest.get_action_chain_csize() + self.__xferfiles += len(dest.attrs.get("chain", "").split()) + + return (self.__xferfiles, self.__xfersize) + + def get_bytes_added(self): + """Return tuple of compressed bytes possibly downloaded + and number of bytes laid down; ignore removals + because they're usually pinned by snapshots""" + + def sum_dest_size(a, b): + if b[1]: + return ( + a[0] + int(b[1].attrs.get("pkg.csize", 0)), + a[1] + int(b[1].attrs.get("pkg.size", 0)), + ) + return (a[0], a[1]) + + return reduce(sum_dest_size, itertools.chain(*self.actions), (0, 0)) + + def get_xferfmri(self): + if self.destination_fmri: + return self.destination_fmri + if self.origin_fmri: + return self.origin_fmri + return None + + def preexecute(self): + """Perform actions required prior to installation or removal of + a package. + + This method executes each action's preremove() or preinstall() + methods, as well as any package-wide steps that need to be taken + at such a time. + """ + + # Determine if license acceptance requirements have been met as + # early as possible. + errors = [] + for lic, entry in self.get_licenses(): + dest = entry["dest"] + if (dest.must_accept and not entry["accepted"]) or ( + dest.must_display and not entry["displayed"] + ): + errors.append( + apx.LicenseAcceptanceError(self.destination_fmri, **entry) + ) + + if errors: + raise apx.PkgLicenseErrors(errors) + + for src, dest in itertools.chain(*self.actions): + if dest: + dest.preinstall(self, src) + else: + src.preremove(self) + + def download(self, progtrack, check_cancel): + """Download data for any actions that need it.""" + progtrack.download_start_pkg(self.get_xferfmri()) + mfile = self.image.transport.multi_file( + self.destination_fmri, progtrack, check_cancel + ) + + if mfile is None: + progtrack.download_end_pkg(self.get_xferfmri()) + return + + for src, dest in itertools.chain(*self.actions): + if dest and dest.needsdata(src, self): + mfile.add_action(dest) + + mfile.wait_files() + progtrack.download_end_pkg(self.get_xferfmri()) + + def cacheload(self): + """Load previously downloaded data for actions that need it.""" + + fmri = self.destination_fmri + for src, dest in itertools.chain(*self.actions): + if not dest or not dest.needsdata(src, self): + continue + dest.data = self.image.transport.action_cached(fmri, dest) + + def gen_install_actions(self): + for src, dest in self.actions.added: + yield src, dest + + def gen_removal_actions(self): + for src, dest in self.actions.removed: + yield src, dest + + def gen_update_actions(self): + for src, dest in self.actions.changed: + yield src, dest + + def execute_install(self, src, dest): + """perform action for installation of package""" + if DebugValues["actions"]: + print("execute_install: {} -> {}".format(src, dest)) + self._executed = True + try: + dest.install(self, src) + except (pkg.actions.ActionError, EnvironmentError): + # Don't log these as they're expected, and should be + # handled by the caller. + raise + except Exception as e: + logger.error( + "Action install failed for '{0}' ({1}):\n " + "{2}: {3}".format( + dest.attrs.get(dest.key_attr, id(dest)), + self.destination_fmri.get_pkg_stem(), + e.__class__.__name__, + e, + ) + ) + raise + + def execute_update(self, src, dest): + """handle action updates""" + if DebugValues["actions"]: + print("execute_update: {} -> {}".format(src, dest)) + self._executed = True + try: + dest.install(self, src) + except (pkg.actions.ActionError, EnvironmentError): + # Don't log these as they're expected, and should be + # handled by the caller. + raise + except Exception as e: + logger.error( + "Action upgrade failed for '{0}' ({1}):\n " + "{2}: {3}".format( + dest.attrs.get(dest.key_attr, id(dest)), + self.destination_fmri.get_pkg_stem(), + e.__class__.__name__, + e, + ) + ) + raise + + def execute_removal(self, src, dest): + """handle action removals""" + if DebugValues["actions"]: + print("execute_removal: {}".format(src)) + self._executed = True + try: + src.remove(self) + except (pkg.actions.ActionError, EnvironmentError): + # Don't log these as they're expected, and should be + # handled by the caller. + raise + except Exception as e: + logger.error( + "Action removal failed for '{0}' ({1}):\n " + "{2}: {3}".format( + src.attrs.get(src.key_attr, id(src)), + self.origin_fmri.get_pkg_stem(), + e.__class__.__name__, + e, + ) + ) + raise + + def execute_retry(self, src, dest): + """handle a retry operation""" + dest.retry(self, dest) + + def postexecute(self): + """Perform actions required after install or remove of a pkg. + + This method executes each action's postremove() or postinstall() + methods, as well as any package-wide steps that need to be taken + at such a time. + """ + # record that package states are consistent + for src, dest in itertools.chain(*self.actions): + if dest: + dest.postinstall(self, src) + else: + src.postremove(self) + + def salvage(self, path): + """Used to save unexpected files or directories found during + plan execution. Salvaged items are tracked in the imageplan. + """ + + assert self._executed + spath = self.image.salvage(path) + # get just the file path that was salvaged + fpath = path.replace(os.path.normpath(self.image.get_root()), "", 1) + if fpath.startswith(os.path.sep): + fpath = fpath[1:] + self.image.imageplan.pd._salvaged.append((fpath, spath)) + + def salvage_from(self, local_path, full_destination): + """move unpackaged contents to specified destination""" + # remove leading / if present + if local_path.startswith(os.path.sep): + local_path = local_path[1:] + + # The salvaged locations known to us are a list of tuples of + # the form (old dir, lost+found salvage dir) and stored in + # self.image.imageplan.pd._salvaged[:] + # - # we don't serialize __origin_mfst, __destination_mfst, or - # __repair_actions since we only support serializing pkgplans which - # have had their actions evaluated and merged, and when action - # evaluation is complete these fields are cleared. + # Check if this salvage-from is also the best match for other + # possibly previously packaged subdirs of this directory. + # E.g. if we stop delivering /var/user/evsuser/.ssh, then the + # action that specifies 'salvage-from=var/user' ought to deal + # with its files. # - # we don't serialize our image object pointer. that has to be reset - # after this object is reloaded. + # On the other hand, if we package another directory, with + # 'salvage-from=/var/user/evsuser', then that should be used + # to salvage the .ssh content, not the action that salvages + # from /var/user. # - __state__noserialize = frozenset([ - "__destination_mfst", - "__origin_mfst", - "__repair_actions", - "__salvage_actions", - "__xferfiles", - "__xfersize", - "image", - ]) - - # make sure all __state__noserialize values are valid - assert (__state__noserialize - set(__slots__)) == set() - - # figure out which state we are saving. - __state__serialize = set(__slots__) - __state__noserialize - - # describe our state and the types of all objects - __state__desc = { - "_autofix_pkgs": [ pkg.fmri.PkgFmri ], - "_license_status": { - six.string_types[0]: { - "src": pkg.actions.generic.NSG, - "dest": pkg.actions.generic.NSG, - }, - }, - "actions": pkg.manifest.ManifestDifference, - "destination_fmri": pkg.fmri.PkgFmri, - "origin_fmri": pkg.fmri.PkgFmri, - } + for fpath, spath in self.image.imageplan.pd._salvaged[:]: + if fpath.startswith(local_path): + for other_salvage in self.__salvage_actions: + if fpath.startswith(other_salvage) and len( + other_salvage + ) > len(local_path): + continue - __state__commonize = frozenset([ - pkg.fmri.PkgFmri, - ]) - - def __init__(self, image=None): - self.destination_fmri = None - self.__destination_mfst = manifest.NullFactoredManifest - - self.origin_fmri = None - self.__origin_mfst = manifest.NullFactoredManifest - - self.actions = manifest.ManifestDifference([], [], []) - self.image = image - self.pkg_summary = None - - self._executed = False - self._license_status = {} - self.__repair_actions = {} - self.__salvage_actions = {} - self.__xferfiles = -1 - self.__xfersize = -1 - self._autofix_pkgs = [] - self._hash = None - - @staticmethod - def getstate(obj, je_state=None): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - - # validate unserialized state - # (see comments above __state__noserialize) - assert obj.__origin_mfst == manifest.NullFactoredManifest - assert obj.__destination_mfst == manifest.NullFactoredManifest - assert obj.__repair_actions == {} - - # we use __slots__, so create a state dictionary - state = {} - for k in obj.__state__serialize: - state[k] = getattr(obj, k) - - return pkg.misc.json_encode(PkgPlan.__name__, state, - PkgPlan.__state__desc, - commonize=PkgPlan.__state__commonize, je_state=je_state) - - @staticmethod - def setstate(obj, state, jd_state=None): - """Update the state of this object using previously serialized - state obtained via getstate().""" - - # get the name of the object we're dealing with - name = type(obj).__name__ - - # decode serialized state into python objects - state = pkg.misc.json_decode(name, state, - PkgPlan.__state__desc, - commonize=PkgPlan.__state__commonize, - jd_state=jd_state) - - # we use __slots__, so directly update attributes - for k in state: - setattr(obj, k, state[k]) - - # update unserialized state - # (see comments above __state__noserialize) - obj.__origin_mfst = manifest.NullFactoredManifest - obj.__destination_mfst = manifest.NullFactoredManifest - obj.__repair_actions = {} - obj.__xferfiles = -1 - obj.__xfersize = -1 - obj.image = None - - @staticmethod - def fromstate(state, jd_state=None): - """Allocate a new object using previously serialized state - obtained via getstate().""" - rv = PkgPlan() - PkgPlan.setstate(rv, state, jd_state) - return rv - - def __str__(self): - s = "{0} -> {1}\n".format(self.origin_fmri, - self.destination_fmri) - for src, dest in itertools.chain(*self.actions): - s += " {0} -> {1}\n".format(src, dest) - return s - - def __add_license(self, src, dest): - """Adds a license status entry for the given src and dest - license actions. - - 'src' should be None or the source action for a license. - - 'dest' must be the destination action for a license.""" - - self._license_status[dest.attrs["license"]] = { - "src": src, - "dest": dest, - "accepted": False, - "displayed": False, - } - - def propose(self, of, om, df, dm): - """Propose origin and dest fmri, manifest""" - self.origin_fmri = of - self.__origin_mfst = om - self.destination_fmri = df - self.__destination_mfst = dm - - def __get_orig_act(self, dest): - """Generate the on-disk state (attributes) of the action - that fail verification.""" - - if not dest.has_payload or "path" not in dest.attrs: - return - - path = os.path.join(self.image.root, dest.attrs["path"]) - try: - pstat = os.lstat(path) - except Exception: - # If file to repair isn't on-disk, treat as install - return + self.image.imageplan.pd._salvaged.remove((fpath, spath)) + self.image.recover(spath, full_destination, local_path, fpath) + + @property + def destination_manifest(self): + return self.__destination_mfst + + def clear_dest_manifest(self): + self.__destination_mfst = manifest.NullFactoredManifest + + @property + def origin_manifest(self): + return self.__origin_mfst + + def clear_origin_manifest(self): + self.__origin_mfst = manifest.NullFactoredManifest - act = pkg.actions.fromstr(str(dest)) - act.attrs["mode"] = oct(stat.S_IMODE(pstat.st_mode)) - try: - owner = pwd.getpwuid(pstat.st_uid).pw_name - group = grp.getgrgid(pstat.st_gid).gr_name - except KeyError: - # If associated user / group can't be determined, treat - # as install. This is not optimal for repairs, but - # ensures proper ownership of file is set. - return - act.attrs["owner"] = owner - act.attrs["group"] = group - - # No need to generate hash of on-disk content as verify - # short-circuits hash comparison by setting replace_required - # flag on action. The same is true for preserved files which - # will automatically handle content replacement if needed based - # on the result of _check_preserve. - return act - - def propose_repair(self, fmri, mfst, install, remove, autofix=False): - self.propose(fmri, mfst, fmri, mfst) - # self.origin_fmri = None - # I'd like a cleaner solution than this; we need to actually - # construct a list of actions as things currently are rather - # than just re-applying the current set of actions. - # - # Create a list of (src, dst) pairs for the actions to send to - # execute_repair. - - if autofix: - # If an uninstall causes a fixup to happen, we can't - # generate an on-disk state action because the result - # of needsdata is different between propose and execute. - # Therefore, we explicitly assign None to src for actions - # to be installed. - self.__repair_actions = { - # src is none for repairs - "install": [(None, x) for x in install], - # dest is none for removals. - "remove": [(x, None) for x in remove], - } - self._autofix_pkgs.append(fmri) - else: - self.__repair_actions = { - # src can be None or an action representing on-disk state - "install": [(self.__get_orig_act(x), x) for x in install], - "remove": [(x, None) for x in remove], - } - - def get_actions(self): - raise NotImplementedError() - - def get_nactions(self): - return len(self.actions.added) + len(self.actions.changed) + \ - len(self.actions.removed) - - def update_pkg_set(self, fmri_set): - """ updates a set of installed fmris to reflect - proposed new state""" - - if self.origin_fmri: - fmri_set.discard(self.origin_fmri) - - if self.destination_fmri: - fmri_set.add(self.destination_fmri) - - def evaluate(self, old_excludes=EmptyI, new_excludes=EmptyI, - can_exclude=False): - """Determine the actions required to transition the package.""" - - # If new actions are being installed, check the destination - # manifest for signatures. - if self.destination_fmri is not None: - try: - dest_pub = self.image.get_publisher( - prefix=self.destination_fmri.publisher) - except apx.UnknownPublisher: - # Since user removed publisher, assume this is - # the same as if they had set signature-policy - # ignore for the publisher. - sig_pol = None - else: - sig_pol = self.image.signature_policy.combine( - dest_pub.signature_policy) - - if self.destination_fmri in self._autofix_pkgs: - # Repaired packages use a manifest synthesized - # from the installed one; so retrieve the - # installed one for our signature checks. - sigman = self.image.get_manifest( - self.destination_fmri, - ignore_excludes=True) - else: - sigman = self.__destination_mfst - - sigs = list(sigman.gen_actions_by_type("signature", - excludes=new_excludes)) - if sig_pol and (sigs or sig_pol.name != "ignore"): - # Only perform signature verification logic if - # there are signatures or if signature-policy - # is not 'ignore'. - - try: - sig_pol.process_signatures(sigs, - sigman.gen_actions(), - dest_pub, self.image.trust_anchors, - self.image.cfg.get_policy( - "check-certificate-revocation")) - except apx.SigningException as e: - e.pfmri = self.destination_fmri - if isinstance(e, apx.BrokenChain): - e.ext_exs.extend( - self.image.bad_trust_anchors - ) - raise - if can_exclude: - if self.__destination_mfst is not None: - self.__destination_mfst.exclude_content( - new_excludes) - if self.__origin_mfst is not None and \ - self.__destination_mfst != self.__origin_mfst: - self.__origin_mfst.exclude_content(old_excludes) - old_excludes = EmptyI - new_excludes = EmptyI - - self.actions = self.__destination_mfst.difference( - self.__origin_mfst, old_excludes, new_excludes, - pkgplan=self) - - # figure out how many implicit directories disappear in this - # transition and add directory remove actions. These won't - # do anything unless no pkgs reference that directory in - # new state.... - - # Retrieving origin_dirs first and then checking it for any - # entries allows avoiding an unnecessary expanddirs for the - # destination manifest when it isn't needed. - origin_dirs = expanddirs(self.__origin_mfst.get_directories( - old_excludes)) - - # Manifest.get_directories() returns implicit directories, which - # means that this computation ends up re-adding all the explicit - # directories getting removed to the removed list. This is - # ugly, but safe. - if origin_dirs: - absent_dirs = origin_dirs - \ - expanddirs(self.__destination_mfst.get_directories( - new_excludes)) - - for a in absent_dirs: - self.actions.removed.append( - (directory.DirectoryAction(path=a, - implicit="True"), None)) - - # Stash information needed by legacy actions. - self.pkg_summary = \ - self.__destination_mfst.get("pkg.summary", - self.__destination_mfst.get("description", "none provided")) - - # Add any install repair actions to the update list - self.actions.changed.extend(self.__repair_actions.get("install", - EmptyI)) - self.actions.removed.extend(self.__repair_actions.get("remove", - EmptyI)) - - # No longer needed. - self.__repair_actions = {} - - for src, dest in itertools.chain(self.gen_update_actions(), - self.gen_install_actions()): - if dest.name == "license": - self.__add_license(src, dest) - if not src: - # Initial installs require acceptance. - continue - src_ma = src.attrs.get("must-accept", False) - dest_ma = dest.attrs.get("must-accept", False) - if (dest_ma and src_ma) and \ - src.hash == dest.hash: - # If src action required acceptance, - # then license was already accepted - # before, and if the hashes are the - # same for the license payload, then - # it doesn't need to be accepted again. - self.set_license_status( - dest.attrs["license"], - accepted=True) - - # Keep a cache of dir actions with salvage-from attrs. - # Since directories are installed in top-down order, - # we need this list to make sure we salvage contents - # as accurately as possible. For example, where: - # - # /var/user gets salvaged to /var/.migrate/user - # and - # /var/user/myuser/.ssh to /var/.migrate/user/myuser/.ssh - # - # We must ensure that we don't try to salvage - # var/user/myuser/.ssh when installing - # /var/.migrate/user, - # but instead wait till /var/.migrate/user/myuser/.ssh - # is being installed, otherwise that content will - # the salvaged to the wrong place. - if (dest.name == "dir" and - "salvage-from" in dest.attrs): - for p in dest.attrlist("salvage-from"): - self.__salvage_actions[p] = dest - - def get_licenses(self): - """A generator function that yields tuples of the form (license, - entry). Where 'entry' is a dict containing the license status - information.""" - - for lic, entry in six.iteritems(self._license_status): - yield lic, entry - - def set_license_status(self, plicense, accepted=None, displayed=None): - """Sets the license status for the given license entry. - - 'plicense' should be the value of the license attribute for the - destination license action. - - 'accepted' is an optional parameter that can be one of three - values: - None leaves accepted status unchanged - False sets accepted status to False - True sets accepted status to True - - 'displayed' is an optional parameter that can be one of three - values: - None leaves displayed status unchanged - False sets displayed status to False - True sets displayed status to True""" - - entry = self._license_status[plicense] - if accepted is not None: - entry["accepted"] = accepted - if displayed is not None: - entry["displayed"] = displayed - - def get_xferstats(self): - if self.__xfersize != -1: - return (self.__xferfiles, self.__xfersize) - - self.__xfersize = 0 - self.__xferfiles = 0 - for src, dest in itertools.chain(*self.actions): - if dest and dest.needsdata(src, self): - self.__xfersize += get_pkg_otw_size(dest) - self.__xferfiles += 1 - if dest.name == "signature": - self.__xfersize += \ - dest.get_action_chain_csize() - self.__xferfiles += \ - len(dest.attrs.get("chain", - "").split()) - - return (self.__xferfiles, self.__xfersize) - - def get_bytes_added(self): - """Return tuple of compressed bytes possibly downloaded - and number of bytes laid down; ignore removals - because they're usually pinned by snapshots""" - def sum_dest_size(a, b): - if b[1]: - return (a[0] + int(b[1].attrs.get("pkg.csize" ,0)), - a[1] + int(b[1].attrs.get("pkg.size", 0))) - return (a[0], a[1]) - - return reduce(sum_dest_size, itertools.chain(*self.actions), - (0, 0)) - - def get_xferfmri(self): - if self.destination_fmri: - return self.destination_fmri - if self.origin_fmri: - return self.origin_fmri - return None - - def preexecute(self): - """Perform actions required prior to installation or removal of - a package. - - This method executes each action's preremove() or preinstall() - methods, as well as any package-wide steps that need to be taken - at such a time. - """ - - # Determine if license acceptance requirements have been met as - # early as possible. - errors = [] - for lic, entry in self.get_licenses(): - dest = entry["dest"] - if (dest.must_accept and not entry["accepted"]) or \ - (dest.must_display and not entry["displayed"]): - errors.append(apx.LicenseAcceptanceError( - self.destination_fmri, **entry)) - - if errors: - raise apx.PkgLicenseErrors(errors) - - for src, dest in itertools.chain(*self.actions): - if dest: - dest.preinstall(self, src) - else: - src.preremove(self) - - def download(self, progtrack, check_cancel): - """Download data for any actions that need it.""" - progtrack.download_start_pkg(self.get_xferfmri()) - mfile = self.image.transport.multi_file(self.destination_fmri, - progtrack, check_cancel) - - if mfile is None: - progtrack.download_end_pkg(self.get_xferfmri()) - return - - for src, dest in itertools.chain(*self.actions): - if dest and dest.needsdata(src, self): - mfile.add_action(dest) - - mfile.wait_files() - progtrack.download_end_pkg(self.get_xferfmri()) - - def cacheload(self): - """Load previously downloaded data for actions that need it.""" - - fmri = self.destination_fmri - for src, dest in itertools.chain(*self.actions): - if not dest or not dest.needsdata(src, self): - continue - dest.data = self.image.transport.action_cached(fmri, - dest) - - def gen_install_actions(self): - for src, dest in self.actions.added: - yield src, dest - - def gen_removal_actions(self): - for src, dest in self.actions.removed: - yield src, dest - - def gen_update_actions(self): - for src, dest in self.actions.changed: - yield src, dest - - def execute_install(self, src, dest): - """ perform action for installation of package""" - if DebugValues["actions"]: - print("execute_install: {} -> {}".format(src, dest)) - self._executed = True - try: - dest.install(self, src) - except (pkg.actions.ActionError, EnvironmentError): - # Don't log these as they're expected, and should be - # handled by the caller. - raise - except Exception as e: - logger.error("Action install failed for '{0}' ({1}):\n " - "{2}: {3}".format(dest.attrs.get(dest.key_attr, - id(dest)), self.destination_fmri.get_pkg_stem(), - e.__class__.__name__, e)) - raise - - def execute_update(self, src, dest): - """ handle action updates""" - if DebugValues["actions"]: - print("execute_update: {} -> {}".format(src, dest)) - self._executed = True - try: - dest.install(self, src) - except (pkg.actions.ActionError, EnvironmentError): - # Don't log these as they're expected, and should be - # handled by the caller. - raise - except Exception as e: - logger.error("Action upgrade failed for '{0}' ({1}):\n " - "{2}: {3}".format(dest.attrs.get(dest.key_attr, - id(dest)), self.destination_fmri.get_pkg_stem(), - e.__class__.__name__, e)) - raise - - def execute_removal(self, src, dest): - """ handle action removals""" - if DebugValues["actions"]: - print("execute_removal: {}".format(src)) - self._executed = True - try: - src.remove(self) - except (pkg.actions.ActionError, EnvironmentError): - # Don't log these as they're expected, and should be - # handled by the caller. - raise - except Exception as e: - logger.error("Action removal failed for '{0}' ({1}):\n " - "{2}: {3}".format(src.attrs.get(src.key_attr, - id(src)), self.origin_fmri.get_pkg_stem(), - e.__class__.__name__, e)) - raise - - def execute_retry(self, src, dest): - """handle a retry operation""" - dest.retry(self, dest) - - def postexecute(self): - """Perform actions required after install or remove of a pkg. - - This method executes each action's postremove() or postinstall() - methods, as well as any package-wide steps that need to be taken - at such a time. - """ - # record that package states are consistent - for src, dest in itertools.chain(*self.actions): - if dest: - dest.postinstall(self, src) - else: - src.postremove(self) - - def salvage(self, path): - """Used to save unexpected files or directories found during - plan execution. Salvaged items are tracked in the imageplan. - """ - - assert self._executed - spath = self.image.salvage(path) - # get just the file path that was salvaged - fpath = path.replace( - os.path.normpath(self.image.get_root()), "", 1) - if fpath.startswith(os.path.sep): - fpath = fpath[1:] - self.image.imageplan.pd._salvaged.append((fpath, spath)) - - def salvage_from(self, local_path, full_destination): - """move unpackaged contents to specified destination""" - # remove leading / if present - if local_path.startswith(os.path.sep): - local_path = local_path[1:] - - # The salvaged locations known to us are a list of tuples of - # the form (old dir, lost+found salvage dir) and stored in - # self.image.imageplan.pd._salvaged[:] - - # - # Check if this salvage-from is also the best match for other - # possibly previously packaged subdirs of this directory. - # E.g. if we stop delivering /var/user/evsuser/.ssh, then the - # action that specifies 'salvage-from=var/user' ought to deal - # with its files. - # - # On the other hand, if we package another directory, with - # 'salvage-from=/var/user/evsuser', then that should be used - # to salvage the .ssh content, not the action that salvages - # from /var/user. - # - for fpath, spath in self.image.imageplan.pd._salvaged[:]: - if fpath.startswith(local_path): - for other_salvage in self.__salvage_actions: - if fpath.startswith(other_salvage) and \ - len(other_salvage) > len(local_path): - continue - - self.image.imageplan.pd._salvaged.remove( - (fpath, spath)) - self.image.recover( - spath, full_destination, - local_path, fpath) - - @property - def destination_manifest(self): - return self.__destination_mfst - - def clear_dest_manifest(self): - self.__destination_mfst = manifest.NullFactoredManifest - - @property - def origin_manifest(self): - return self.__origin_mfst - - def clear_origin_manifest(self): - self.__origin_mfst = manifest.NullFactoredManifest # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/pkgremote.py b/src/modules/client/pkgremote.py index ca91ab1a3..6439c75f8 100644 --- a/src/modules/client/pkgremote.py +++ b/src/modules/client/pkgremote.py @@ -52,495 +52,511 @@ # debugging aids # DebugValues is a singleton; pylint: disable=E1120 pkgremote_debug = ( - DebugValues.get_value("pkgremote_debug") is not None or - os.environ.get("PKG_PKGREMOTE_DEBUG", None) is not None) + DebugValues.get_value("pkgremote_debug") is not None + or os.environ.get("PKG_PKGREMOTE_DEBUG", None) is not None +) -class PkgRemote(object): - """This class is used to perform packaging operation on an image. It - utilizes the "remote" subcommand within the pkg.1 client to manipulate - images. Communication between this class and the "pkg remote" process - is done via RPC. This class essentially implements an RPC client and - the "pkg remote" process is an RPC server.""" - - # variables to keep track of our RPC client call state. - __IDLE = "call-idle" - __SETUP = "call-setup" - __STARTED = "call-started" - - def __init__(self): - # initialize RPC server process state - self.__rpc_server_proc = None - self.__rpc_server_fstdout = None - self.__rpc_server_fstderr = None - self.__rpc_server_prog_pipe_fobj = None - - # initialize RPC client process state - self.__rpc_client = None - self.__rpc_client_prog_pipe_fobj = None - - # initialize RPC client call state - self.__state = self.__IDLE - self.__pkg_op = None - self.__kwargs = None - self.__async_rpc_caller = None - self.__async_rpc_waiter = None - self.__result = None - - # sanity check the idle state by re-initializing it - self.__set_state_idle() - - def __debug_msg(self, msg, t1=False, t2=False): - """Log debugging messages.""" - - if not pkgremote_debug: - return - - if t1: - prefix = "PkgRemote({0}) client thread 1: ".format( - id(self)) - elif t2: - prefix = "PkgRemote({0}) client thread 2: ".format( - id(self)) - else: - prefix = "PkgRemote({0}) client: ".format(id(self)) - - # it's not an enforcement but a coding style - # logging-format-interpolation; pylint: disable=W1202 - global_settings.logger.info("{0}{1}".format(prefix, msg)) - - def __rpc_server_fork(self, img_path, - server_cmd_pipe, server_prog_pipe_fobj): - """Fork off a "pkg remote" server process. - - 'img_path' is the path to the image to manipulate. - - 'server_cmd_pipe' is the server side of the command pipe which - the server will use to receive RPC requests. - - 'server_prog_pipe_fobj' is the server side of the progress - pipe which the server will write to to indicate progress.""" - - pkg_cmd = pkg.misc.api_pkgcmd() + [ - "-R", img_path, - "--runid={0}".format(global_settings.client_runid), - "remote", - "--ctlfd={0}".format(server_cmd_pipe), - "--progfd={0}".format(server_prog_pipe_fobj.fileno()), - ] - - self.__debug_msg("RPC server cmd: {0}".format( - " ".join(pkg_cmd))) - - # create temporary files to log standard output and error from - # the RPC server. - fstdout = tempfile.TemporaryFile() - fstderr = tempfile.TemporaryFile() - - try: - # Under Python 3.4, os.pipe() returns non-inheritable - # file descriptors. On UNIX, subprocess makes file - # descriptors of the pass_fds parameter inheritable. - # Since our pkgsubprocess use posix_pspawn* and doesn't - # have an interface for pass_fds, we reuse the Python - # module subprocess here. - # unexpected-keyword-arg 'pass_fds'; - # pylint: disable=E1123 - # Redefinition of p type - if six.PY2: - p = pkg.pkgsubprocess.Popen(pkg_cmd, - stdout=fstdout, stderr=fstderr) - else: - p = subprocess.Popen(pkg_cmd, - stdout=fstdout, stderr=fstderr, - pass_fds=(server_cmd_pipe, - server_prog_pipe_fobj.fileno())) - - except OSError as e: - # Access to protected member; pylint: disable=W0212 - raise apx._convert_error(e) - - # initalization successful, update RPC server state - self.__rpc_server_proc = p - self.__rpc_server_fstdout = fstdout - self.__rpc_server_fstderr = fstderr - self.__rpc_server_prog_pipe_fobj = server_prog_pipe_fobj - - def __rpc_server_setup(self, img_path): - """Start a new RPC Server process. - - 'img_path' is the path to the image to manipulate.""" - - # create a pipe for communication between the client and server - client_cmd_pipe, server_cmd_pipe = os.pipe() - # create a pipe that the server server can use to indicate - # progress to the client. wrap the pipe fds in python file - # objects so that they gets closed automatically when those - # objects are dereferenced. - client_prog_pipe, server_prog_pipe = os.pipe() - client_prog_pipe_fobj = os.fdopen(client_prog_pipe, "r") - server_prog_pipe_fobj = os.fdopen(server_prog_pipe, "w") - - # initialize the client side of the RPC server - rpc_client = pkg.pipeutils.PipedServerProxy(client_cmd_pipe) - - # fork off the server - self.__rpc_server_fork(img_path, - server_cmd_pipe, server_prog_pipe_fobj) - - # close our reference to server end of the pipe. (the server - # should have already closed its reference to the client end - # of the pipe.) - os.close(server_cmd_pipe) - - # initalization successful, update RPC client state - self.__rpc_client = rpc_client - self.__rpc_client_prog_pipe_fobj = client_prog_pipe_fobj - - def __rpc_server_fini(self): - """Close connection to a RPC Server process.""" - - # destroying the RPC client object closes our connection to - # the server, which should cause the server to exit. - self.__rpc_client = None - - # if we have a server, kill it and wait for it to exit - if self.__rpc_server_proc: - self.__rpc_server_proc.terminate() - self.__rpc_server_proc.wait() - - # clear server state (which closes the rpc pipe file - # descriptors) - self.__rpc_server_proc = None - self.__rpc_server_fstdout = None - self.__rpc_server_fstderr = None - - # wait for any client RPC threads to exit - if self.__async_rpc_caller: - self.__async_rpc_caller.join() - if self.__async_rpc_waiter: - self.__async_rpc_waiter.join() - - # close the progress pipe - self.__rpc_server_prog_pipe_fobj = None - self.__rpc_client_prog_pipe_fobj = None - - def fileno(self): - """Return the progress pipe for the server process. We use - this to monitor progress in the RPC server""" - - return self.__rpc_client_prog_pipe_fobj.fileno() - - def __rpc_client_prog_pipe_drain(self): - """Drain the client progress pipe.""" - - progfd = self.__rpc_client_prog_pipe_fobj.fileno() - p = select.poll() - p.register(progfd, select.POLLIN) - while p.poll(0): - os.read(progfd, 10240) - - def __state_verify(self, state=None): - """Sanity check our internal call state. - - 'state' is an optional parameter that indicates which state - we should be in now. (without this parameter we just verify - that the current state, whatever it is, is self - consistent.)""" - - if state is not None: - assert self.__state == state, \ - "{0} == {1}".format(self.__state, state) - else: - state = self.__state - - if state == self.__IDLE: - assert self.__pkg_op is None, \ - "{0} is None".format(self.__pkg_op) - assert self.__kwargs is None, \ - "{0} is None".format(self.__kwargs) - assert self.__async_rpc_caller is None, \ - "{0} is None".format(self.__async_rpc_caller) - assert self.__async_rpc_waiter is None, \ - "{0} is None".format(self.__async_rpc_waiter) - assert self.__result is None, \ - "{0} is None".format(self.__result) - - elif state == self.__SETUP: - assert self.__pkg_op is not None, \ - "{0} is not None".format(self.__pkg_op) - assert self.__kwargs is not None, \ - "{0} is not None".format(self.__kwargs) - assert self.__async_rpc_caller is None, \ - "{0} is None".format(self.__async_rpc_caller) - assert self.__async_rpc_waiter is None, \ - "{0} is None".format(self.__async_rpc_waiter) - assert self.__result is None, \ - "{0} is None".format(self.__result) - - elif state == self.__STARTED: - assert self.__pkg_op is not None, \ - "{0} is not None".format(self.__pkg_op) - assert self.__kwargs is not None, \ - "{0} is not None".format(self.__kwargs) - assert self.__async_rpc_caller is not None, \ - "{0} is not None".format(self.__async_rpc_caller) - assert self.__async_rpc_waiter is not None, \ - "{0} is not None".format(self.__async_rpc_waiter) - assert self.__result is None, \ - "{0} is None".format(self.__result) - - def __set_state_idle(self): - """Enter the __IDLE state. This clears all RPC call - state.""" - - # verify the current state - self.__state_verify() - - # setup the new state - self.__state = self.__IDLE - self.__pkg_op = None - self.__kwargs = None - self.__async_rpc_caller = None - self.__async_rpc_waiter = None - self.__result = None - self.__debug_msg("set call state: {0}".format(self.__state)) - - # verify the new state - self.__state_verify() - - def __set_state_setup(self, pkg_op, kwargs): - """Enter the __SETUP state. This indicates that we're - all ready to make a call into the RPC server. - - 'pkg_op' is the packaging operation we're going to do via RPC - - 'kwargs' is the argument dict for the RPC operation. - - 't' is the RPC client thread that will call into the RPC - server.""" - - # verify the current state - self.__state_verify(state=self.__IDLE) - - # setup the new state - self.__state = self.__SETUP - self.__pkg_op = pkg_op - self.__kwargs = kwargs - self.__debug_msg("set call state: {0}, pkg op: {1}".format( - self.__state, pkg_op)) - - # verify the new state - self.__state_verify() - - def __set_state_started(self, async_rpc_caller, async_rpc_waiter): - """Enter the __SETUP state. This indicates that we've - started a call to the RPC server and we're now waiting for - that call to return.""" - - # verify the current state - self.__state_verify(state=self.__SETUP) - - # setup the new state - self.__state = self.__STARTED - self.__async_rpc_caller = async_rpc_caller - self.__async_rpc_waiter = async_rpc_waiter - self.__debug_msg("set call state: {0}".format(self.__state)) - - # verify the new state - self.__state_verify() - - def __rpc_async_caller(self, fstdout, fstderr, rpc_client, - pkg_op, **kwargs): - """RPC thread callback. This routine is invoked in its own - thread (so the caller doesn't have to block) and it makes a - blocking call to the RPC server. - - 'kwargs' is the argument dict for the RPC operation.""" - - self.__debug_msg("starting pkg op: {0}; args: {1}".format( - pkg_op, kwargs), t1=True) - - # make the RPC call - rv = e = None - rpc_method = getattr(rpc_client, pkg_op) - try: - # Catch "Exception"; pylint: disable=W0703 - rv = rpc_method(**kwargs) - except Exception as ex: - # due to python 3 scoping rules - e = ex - self.__debug_msg("caught exception\n{0}".format( - traceback.format_exc()), t1=True) - else: - self.__debug_msg("returned: {0}".format(rv), t1=True) - # ensure that the decoding is performed using the user's locale - # because messages from the called program will be using it. - encoding = locale.getpreferredencoding(do_setlocale=False) - - # get output generated by the RPC server. the server - # truncates its output file after each operation, so we always - # read output from the beginning of the file. - fstdout.seek(0) - stdout = (b"".join(fstdout.readlines())).decode(encoding) - fstderr.seek(0) - stderr = (b"".join(fstderr.readlines())).decode(encoding) - - self.__debug_msg("exiting", t1=True) - return (rv, e, stdout, stderr) - - def __rpc_async_waiter(self, async_call, prog_pipe): - """RPC waiter thread. This thread waits on the RPC thread - and signals its completion by writing a byte to the progress - pipe. - - The RPC call thread can't do this for itself because that - results in a race (the RPC thread could block after writing - this byte but before actually exiting, and then the client - would read the byte, see that the RPC thread is not done, and - block while trying to read another byte which would never show - up). This thread solves this problem without using any shared - state.""" - - self.__debug_msg("starting", t2=True) - async_call.join() - try: - os.write(prog_pipe.fileno(), b".") - except (IOError, OSError): - pass - self.__debug_msg("exiting", t2=True) - - def __rpc_client_setup(self, pkg_op, **kwargs): - """Prepare to perform a RPC operation. - - 'pkg_op' is the packaging operation we're going to do via RPC - - 'kwargs' is the argument dict for the RPC operation.""" - - self.__set_state_setup(pkg_op, kwargs) - - # drain the progress pipe - self.__rpc_client_prog_pipe_drain() - - def setup(self, img_path, pkg_op, **kwargs): - """Public interface to setup a remote packaging operation. - - 'img_path' is the path to the image to manipulate. - - 'pkg_op' is the packaging operation we're going to do via RPC - - 'kwargs' is the argument dict for the RPC operation.""" - - self.__debug_msg("setup()") - self.__rpc_server_setup(img_path) - self.__rpc_client_setup(pkg_op, **kwargs) - - def start(self): - """Public interface to start a remote packaging operation.""" - self.__debug_msg("start()") - self.__state_verify(self.__SETUP) +class PkgRemote(object): + """This class is used to perform packaging operation on an image. It + utilizes the "remote" subcommand within the pkg.1 client to manipulate + images. Communication between this class and the "pkg remote" process + is done via RPC. This class essentially implements an RPC client and + the "pkg remote" process is an RPC server.""" + + # variables to keep track of our RPC client call state. + __IDLE = "call-idle" + __SETUP = "call-setup" + __STARTED = "call-started" + + def __init__(self): + # initialize RPC server process state + self.__rpc_server_proc = None + self.__rpc_server_fstdout = None + self.__rpc_server_fstderr = None + self.__rpc_server_prog_pipe_fobj = None + + # initialize RPC client process state + self.__rpc_client = None + self.__rpc_client_prog_pipe_fobj = None + + # initialize RPC client call state + self.__state = self.__IDLE + self.__pkg_op = None + self.__kwargs = None + self.__async_rpc_caller = None + self.__async_rpc_waiter = None + self.__result = None + + # sanity check the idle state by re-initializing it + self.__set_state_idle() + + def __debug_msg(self, msg, t1=False, t2=False): + """Log debugging messages.""" + + if not pkgremote_debug: + return + + if t1: + prefix = "PkgRemote({0}) client thread 1: ".format(id(self)) + elif t2: + prefix = "PkgRemote({0}) client thread 2: ".format(id(self)) + else: + prefix = "PkgRemote({0}) client: ".format(id(self)) + + # it's not an enforcement but a coding style + # logging-format-interpolation; pylint: disable=W1202 + global_settings.logger.info("{0}{1}".format(prefix, msg)) + + def __rpc_server_fork( + self, img_path, server_cmd_pipe, server_prog_pipe_fobj + ): + """Fork off a "pkg remote" server process. + + 'img_path' is the path to the image to manipulate. + + 'server_cmd_pipe' is the server side of the command pipe which + the server will use to receive RPC requests. + + 'server_prog_pipe_fobj' is the server side of the progress + pipe which the server will write to to indicate progress.""" + + pkg_cmd = pkg.misc.api_pkgcmd() + [ + "-R", + img_path, + "--runid={0}".format(global_settings.client_runid), + "remote", + "--ctlfd={0}".format(server_cmd_pipe), + "--progfd={0}".format(server_prog_pipe_fobj.fileno()), + ] + + self.__debug_msg("RPC server cmd: {0}".format(" ".join(pkg_cmd))) + + # create temporary files to log standard output and error from + # the RPC server. + fstdout = tempfile.TemporaryFile() + fstderr = tempfile.TemporaryFile() + + try: + # Under Python 3.4, os.pipe() returns non-inheritable + # file descriptors. On UNIX, subprocess makes file + # descriptors of the pass_fds parameter inheritable. + # Since our pkgsubprocess use posix_pspawn* and doesn't + # have an interface for pass_fds, we reuse the Python + # module subprocess here. + # unexpected-keyword-arg 'pass_fds'; + # pylint: disable=E1123 + # Redefinition of p type + if six.PY2: + p = pkg.pkgsubprocess.Popen( + pkg_cmd, stdout=fstdout, stderr=fstderr + ) + else: + p = subprocess.Popen( + pkg_cmd, + stdout=fstdout, + stderr=fstderr, + pass_fds=(server_cmd_pipe, server_prog_pipe_fobj.fileno()), + ) + + except OSError as e: + # Access to protected member; pylint: disable=W0212 + raise apx._convert_error(e) + + # initalization successful, update RPC server state + self.__rpc_server_proc = p + self.__rpc_server_fstdout = fstdout + self.__rpc_server_fstderr = fstderr + self.__rpc_server_prog_pipe_fobj = server_prog_pipe_fobj + + def __rpc_server_setup(self, img_path): + """Start a new RPC Server process. + + 'img_path' is the path to the image to manipulate.""" + + # create a pipe for communication between the client and server + client_cmd_pipe, server_cmd_pipe = os.pipe() + # create a pipe that the server server can use to indicate + # progress to the client. wrap the pipe fds in python file + # objects so that they gets closed automatically when those + # objects are dereferenced. + client_prog_pipe, server_prog_pipe = os.pipe() + client_prog_pipe_fobj = os.fdopen(client_prog_pipe, "r") + server_prog_pipe_fobj = os.fdopen(server_prog_pipe, "w") + + # initialize the client side of the RPC server + rpc_client = pkg.pipeutils.PipedServerProxy(client_cmd_pipe) + + # fork off the server + self.__rpc_server_fork(img_path, server_cmd_pipe, server_prog_pipe_fobj) + + # close our reference to server end of the pipe. (the server + # should have already closed its reference to the client end + # of the pipe.) + os.close(server_cmd_pipe) + + # initalization successful, update RPC client state + self.__rpc_client = rpc_client + self.__rpc_client_prog_pipe_fobj = client_prog_pipe_fobj + + def __rpc_server_fini(self): + """Close connection to a RPC Server process.""" + + # destroying the RPC client object closes our connection to + # the server, which should cause the server to exit. + self.__rpc_client = None + + # if we have a server, kill it and wait for it to exit + if self.__rpc_server_proc: + self.__rpc_server_proc.terminate() + self.__rpc_server_proc.wait() + + # clear server state (which closes the rpc pipe file + # descriptors) + self.__rpc_server_proc = None + self.__rpc_server_fstdout = None + self.__rpc_server_fstderr = None + + # wait for any client RPC threads to exit + if self.__async_rpc_caller: + self.__async_rpc_caller.join() + if self.__async_rpc_waiter: + self.__async_rpc_waiter.join() + + # close the progress pipe + self.__rpc_server_prog_pipe_fobj = None + self.__rpc_client_prog_pipe_fobj = None + + def fileno(self): + """Return the progress pipe for the server process. We use + this to monitor progress in the RPC server""" + + return self.__rpc_client_prog_pipe_fobj.fileno() + + def __rpc_client_prog_pipe_drain(self): + """Drain the client progress pipe.""" + + progfd = self.__rpc_client_prog_pipe_fobj.fileno() + p = select.poll() + p.register(progfd, select.POLLIN) + while p.poll(0): + os.read(progfd, 10240) + + def __state_verify(self, state=None): + """Sanity check our internal call state. + + 'state' is an optional parameter that indicates which state + we should be in now. (without this parameter we just verify + that the current state, whatever it is, is self + consistent.)""" + + if state is not None: + assert self.__state == state, "{0} == {1}".format( + self.__state, state + ) + else: + state = self.__state + + if state == self.__IDLE: + assert self.__pkg_op is None, "{0} is None".format(self.__pkg_op) + assert self.__kwargs is None, "{0} is None".format(self.__kwargs) + assert self.__async_rpc_caller is None, "{0} is None".format( + self.__async_rpc_caller + ) + assert self.__async_rpc_waiter is None, "{0} is None".format( + self.__async_rpc_waiter + ) + assert self.__result is None, "{0} is None".format(self.__result) + + elif state == self.__SETUP: + assert self.__pkg_op is not None, "{0} is not None".format( + self.__pkg_op + ) + assert self.__kwargs is not None, "{0} is not None".format( + self.__kwargs + ) + assert self.__async_rpc_caller is None, "{0} is None".format( + self.__async_rpc_caller + ) + assert self.__async_rpc_waiter is None, "{0} is None".format( + self.__async_rpc_waiter + ) + assert self.__result is None, "{0} is None".format(self.__result) + + elif state == self.__STARTED: + assert self.__pkg_op is not None, "{0} is not None".format( + self.__pkg_op + ) + assert self.__kwargs is not None, "{0} is not None".format( + self.__kwargs + ) + assert ( + self.__async_rpc_caller is not None + ), "{0} is not None".format(self.__async_rpc_caller) + assert ( + self.__async_rpc_waiter is not None + ), "{0} is not None".format(self.__async_rpc_waiter) + assert self.__result is None, "{0} is None".format(self.__result) + + def __set_state_idle(self): + """Enter the __IDLE state. This clears all RPC call + state.""" + + # verify the current state + self.__state_verify() + + # setup the new state + self.__state = self.__IDLE + self.__pkg_op = None + self.__kwargs = None + self.__async_rpc_caller = None + self.__async_rpc_waiter = None + self.__result = None + self.__debug_msg("set call state: {0}".format(self.__state)) + + # verify the new state + self.__state_verify() + + def __set_state_setup(self, pkg_op, kwargs): + """Enter the __SETUP state. This indicates that we're + all ready to make a call into the RPC server. + + 'pkg_op' is the packaging operation we're going to do via RPC + + 'kwargs' is the argument dict for the RPC operation. + + 't' is the RPC client thread that will call into the RPC + server.""" + + # verify the current state + self.__state_verify(state=self.__IDLE) + + # setup the new state + self.__state = self.__SETUP + self.__pkg_op = pkg_op + self.__kwargs = kwargs + self.__debug_msg( + "set call state: {0}, pkg op: {1}".format(self.__state, pkg_op) + ) + + # verify the new state + self.__state_verify() + + def __set_state_started(self, async_rpc_caller, async_rpc_waiter): + """Enter the __SETUP state. This indicates that we've + started a call to the RPC server and we're now waiting for + that call to return.""" + + # verify the current state + self.__state_verify(state=self.__SETUP) + + # setup the new state + self.__state = self.__STARTED + self.__async_rpc_caller = async_rpc_caller + self.__async_rpc_waiter = async_rpc_waiter + self.__debug_msg("set call state: {0}".format(self.__state)) + + # verify the new state + self.__state_verify() + + def __rpc_async_caller( + self, fstdout, fstderr, rpc_client, pkg_op, **kwargs + ): + """RPC thread callback. This routine is invoked in its own + thread (so the caller doesn't have to block) and it makes a + blocking call to the RPC server. + + 'kwargs' is the argument dict for the RPC operation.""" + + self.__debug_msg( + "starting pkg op: {0}; args: {1}".format(pkg_op, kwargs), t1=True + ) + + # make the RPC call + rv = e = None + rpc_method = getattr(rpc_client, pkg_op) + try: + # Catch "Exception"; pylint: disable=W0703 + rv = rpc_method(**kwargs) + except Exception as ex: + # due to python 3 scoping rules + e = ex + self.__debug_msg( + "caught exception\n{0}".format(traceback.format_exc()), t1=True + ) + else: + self.__debug_msg("returned: {0}".format(rv), t1=True) + # ensure that the decoding is performed using the user's locale + # because messages from the called program will be using it. + encoding = locale.getpreferredencoding(do_setlocale=False) + + # get output generated by the RPC server. the server + # truncates its output file after each operation, so we always + # read output from the beginning of the file. + fstdout.seek(0) + stdout = (b"".join(fstdout.readlines())).decode(encoding) + fstderr.seek(0) + stderr = (b"".join(fstderr.readlines())).decode(encoding) + + self.__debug_msg("exiting", t1=True) + return (rv, e, stdout, stderr) + + def __rpc_async_waiter(self, async_call, prog_pipe): + """RPC waiter thread. This thread waits on the RPC thread + and signals its completion by writing a byte to the progress + pipe. + + The RPC call thread can't do this for itself because that + results in a race (the RPC thread could block after writing + this byte but before actually exiting, and then the client + would read the byte, see that the RPC thread is not done, and + block while trying to read another byte which would never show + up). This thread solves this problem without using any shared + state.""" + + self.__debug_msg("starting", t2=True) + async_call.join() + try: + os.write(prog_pipe.fileno(), b".") + except (IOError, OSError): + pass + self.__debug_msg("exiting", t2=True) + + def __rpc_client_setup(self, pkg_op, **kwargs): + """Prepare to perform a RPC operation. + + 'pkg_op' is the packaging operation we're going to do via RPC + + 'kwargs' is the argument dict for the RPC operation.""" + + self.__set_state_setup(pkg_op, kwargs) + + # drain the progress pipe + self.__rpc_client_prog_pipe_drain() + + def setup(self, img_path, pkg_op, **kwargs): + """Public interface to setup a remote packaging operation. + + 'img_path' is the path to the image to manipulate. + + 'pkg_op' is the packaging operation we're going to do via RPC + + 'kwargs' is the argument dict for the RPC operation.""" + + self.__debug_msg("setup()") + self.__rpc_server_setup(img_path) + self.__rpc_client_setup(pkg_op, **kwargs) + + def start(self): + """Public interface to start a remote packaging operation.""" + + self.__debug_msg("start()") + self.__state_verify(self.__SETUP) + + async_rpc_caller = pkg.misc.AsyncCall() + async_rpc_caller.start( + self.__rpc_async_caller, + self.__rpc_server_fstdout, + self.__rpc_server_fstderr, + self.__rpc_client, + self.__pkg_op, + **self.__kwargs, + ) - async_rpc_caller = pkg.misc.AsyncCall() - async_rpc_caller.start( - self.__rpc_async_caller, - self.__rpc_server_fstdout, - self.__rpc_server_fstderr, - self.__rpc_client, - self.__pkg_op, - **self.__kwargs) + async_rpc_waiter = pkg.misc.AsyncCall() + async_rpc_waiter.start( + self.__rpc_async_waiter, + async_rpc_caller, + self.__rpc_server_prog_pipe_fobj, + ) - async_rpc_waiter = pkg.misc.AsyncCall() - async_rpc_waiter.start( - self.__rpc_async_waiter, - async_rpc_caller, - self.__rpc_server_prog_pipe_fobj) + self.__set_state_started(async_rpc_caller, async_rpc_waiter) - self.__set_state_started(async_rpc_caller, async_rpc_waiter) + def is_done(self): + """Public interface to query if a remote packaging operation + is done.""" - def is_done(self): - """Public interface to query if a remote packaging operation - is done.""" + self.__debug_msg("is_done()") + assert self.__state in [self.__SETUP, self.__STARTED] - self.__debug_msg("is_done()") - assert self.__state in [self.__SETUP, self.__STARTED] + # drain the progress pipe. + self.__rpc_client_prog_pipe_drain() - # drain the progress pipe. - self.__rpc_client_prog_pipe_drain() + if self.__state == self.__SETUP: + rv = False + else: + # see if the client is done + rv = self.__async_rpc_caller.is_done() - if self.__state == self.__SETUP: - rv = False - else: - # see if the client is done - rv = self.__async_rpc_caller.is_done() + return rv - return rv + def result(self): + """Public interface to get the result of a remote packaging + operation. If the operation is not yet completed, this + interface will block until it finishes. The return value is a + tuple which contains: - def result(self): - """Public interface to get the result of a remote packaging - operation. If the operation is not yet completed, this - interface will block until it finishes. The return value is a - tuple which contains: + 'rv' is the return value of the RPC operation - 'rv' is the return value of the RPC operation + 'e' is any exception generated by the RPC operation - 'e' is any exception generated by the RPC operation + 'stdout' is the standard output generated by the RPC server + during the RPC operation. - 'stdout' is the standard output generated by the RPC server - during the RPC operation. + 'stderr' is the standard output generated by the RPC server + during the RPC operation.""" - 'stderr' is the standard output generated by the RPC server - during the RPC operation.""" + self.__debug_msg("result()") + self.__state_verify(self.__STARTED) - self.__debug_msg("result()") - self.__state_verify(self.__STARTED) + rvtuple = e = None + try: + rvtuple = self.__async_rpc_caller.result() + except pkg.misc.AsyncCallException as ex: + # due to python 3 scoping rules + e = ex - rvtuple = e = None - try: - rvtuple = self.__async_rpc_caller.result() - except pkg.misc.AsyncCallException as ex: - # due to python 3 scoping rules - e = ex + # assume we didn't get any results + rv = pkgdefs.EXIT_OOPS + stdout = stderr = "" - # assume we didn't get any results - rv = pkgdefs.EXIT_OOPS - stdout = stderr = "" + # unpack our results if we got any + if e is None: + # unpack our results. + # our results can contain an embedded exception. + # Attempting to unpack a non-sequence%s; + # pylint: disable=W0633 + rv, e, stdout, stderr = rvtuple - # unpack our results if we got any - if e is None: - # unpack our results. - # our results can contain an embedded exception. - # Attempting to unpack a non-sequence%s; - # pylint: disable=W0633 - rv, e, stdout, stderr = rvtuple + # make sure the return value is an int + if type(rv) != int: + rv = pkgdefs.EXIT_OOPS - # make sure the return value is an int - if type(rv) != int: - rv = pkgdefs.EXIT_OOPS + # if we got any errors, make sure we return OOPS + if e is not None: + rv = pkgdefs.EXIT_OOPS - # if we got any errors, make sure we return OOPS - if e is not None: - rv = pkgdefs.EXIT_OOPS + # shutdown the RPC server + self.__rpc_server_fini() - # shutdown the RPC server - self.__rpc_server_fini() + # pack up our results and enter the done state + self.__set_state_idle() - # pack up our results and enter the done state - self.__set_state_idle() + return (rv, e, stdout, stderr) - return (rv, e, stdout, stderr) + def abort(self): + """Public interface to abort an in-progress RPC operation.""" - def abort(self): - """Public interface to abort an in-progress RPC operation.""" + assert self.__state in [self.__SETUP, self.__STARTED] - assert self.__state in [self.__SETUP, self.__STARTED] + self.__debug_msg("call abort requested") - self.__debug_msg("call abort requested") + # shutdown the RPC server + self.__rpc_server_fini() - # shutdown the RPC server - self.__rpc_server_fini() + # enter the idle state + self.__set_state_idle() - # enter the idle state - self.__set_state_idle() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/plandesc.py b/src/modules/client/plandesc.py index b2539a9c6..f5ebf8f78 100644 --- a/src/modules/client/plandesc.py +++ b/src/modules/client/plandesc.py @@ -55,1058 +55,1102 @@ import pkg.misc import pkg.version -from pkg.api_common import (PackageInfo, LicenseInfo) +from pkg.api_common import PackageInfo, LicenseInfo from pkg.client.pkgdefs import MSG_GENERAL -UNEVALUATED = 0 # nothing done yet -EVALUATED_PKGS = 1 # established fmri changes -MERGED_OK = 2 # created single merged plan -EVALUATED_OK = 3 # ready to execute -PREEXECUTED_OK = 4 # finished w/ preexecute -PREEXECUTED_ERROR = 5 # whoops -EXECUTED_OK = 6 # finished execution -EXECUTED_ERROR = 7 # failed +UNEVALUATED = 0 # nothing done yet +EVALUATED_PKGS = 1 # established fmri changes +MERGED_OK = 2 # created single merged plan +EVALUATED_OK = 3 # ready to execute +PREEXECUTED_OK = 4 # finished w/ preexecute +PREEXECUTED_ERROR = 5 # whoops +EXECUTED_OK = 6 # finished execution +EXECUTED_ERROR = 7 # failed + +OP_STAGE_PLAN = 0 +OP_STAGE_PREP = 1 +OP_STAGE_EXEC = 2 +OP_STAGE_PRINTED = 3 # The message has been consumed by a client -OP_STAGE_PLAN = 0 -OP_STAGE_PREP = 1 -OP_STAGE_EXEC = 2 -OP_STAGE_PRINTED = 3 # The message has been consumed by a client class _ActionPlan(collections.namedtuple("_ActionPlan", "p src dst")): - """A named tuple used to keep track of all the actions that will be - executed during an image-modifying procecure.""" - # Class has no __init__ method; pylint: disable=W0232 - # Use __slots__ on an old style class; pylint: disable=E1001 + """A named tuple used to keep track of all the actions that will be + executed during an image-modifying procecure.""" + + # Class has no __init__ method; pylint: disable=W0232 + # Use __slots__ on an old style class; pylint: disable=E1001 - __slots__ = [] + __slots__ = [] - __state__desc = tuple([ + __state__desc = tuple( + [ pkg.client.pkgplan.PkgPlan, pkg.actions.generic.NSG, pkg.actions.generic.NSG, - ]) + ] + ) - @staticmethod - def getstate(obj, je_state=None): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - return pkg.misc.json_encode(_ActionPlan.__name__, tuple(obj), - _ActionPlan.__state__desc, je_state=je_state) + @staticmethod + def getstate(obj, je_state=None): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + return pkg.misc.json_encode( + _ActionPlan.__name__, + tuple(obj), + _ActionPlan.__state__desc, + je_state=je_state, + ) - @staticmethod - def fromstate(state, jd_state=None): - """Allocate a new object using previously serialized state - obtained via getstate().""" - # Access to protected member; pylint: disable=W0212 + @staticmethod + def fromstate(state, jd_state=None): + """Allocate a new object using previously serialized state + obtained via getstate().""" + # Access to protected member; pylint: disable=W0212 - # get the name of the object we're dealing with - name = _ActionPlan.__name__ + # get the name of the object we're dealing with + name = _ActionPlan.__name__ - # decode serialized state into python objects - state = pkg.misc.json_decode(name, state, - _ActionPlan.__state__desc, jd_state=jd_state) + # decode serialized state into python objects + state = pkg.misc.json_decode( + name, state, _ActionPlan.__state__desc, jd_state=jd_state + ) - return _ActionPlan(*state) + return _ActionPlan(*state) class PlanDescription(object): - """A class which describes the changes the plan will make.""" - - __state__desc = { - "_actuators": pkg.client.actuator.Actuator, - "_cfg_mediators": { + """A class which describes the changes the plan will make.""" + + __state__desc = { + "_actuators": pkg.client.actuator.Actuator, + "_cfg_mediators": { + str: { + "version": pkg.version.Version, + "implementation-version": pkg.version.Version, + } + }, + "_fmri_changes": [(pkg.fmri.PkgFmri, pkg.fmri.PkgFmri)], + # avoid, implicit-avoid, obsolete + "_new_avoid_obs": (set(), set(), set()), + "_new_mediators": collections.defaultdict( + set, + { str: { "version": pkg.version.Version, "implementation-version": pkg.version.Version, } }, - "_fmri_changes": [ ( pkg.fmri.PkgFmri, pkg.fmri.PkgFmri ) ], - # avoid, implicit-avoid, obsolete - "_new_avoid_obs": ( set(), set(), set() ), - "_new_mediators": collections.defaultdict(set, { - str: { - "version": pkg.version.Version, - "implementation-version": pkg.version.Version, - } - }), - "_old_facets": pkg.facet.Facets, - "_new_facets": pkg.facet.Facets, - "_rm_aliases": { str: set() }, - "_preserved": { - "moved": [[str, str]], - "removed": [[str]], - "installed": [[str]], - "updated": [[str]], - }, - # Messaging looks like: - # {"item_id": {"sub_item_id": [], "messages": []}} - "_item_msgs": collections.defaultdict(dict), - "_pkg_actuators": { str: { str: [ str ] } }, - "added_groups": { str: pkg.fmri.PkgFmri }, - "added_users": { str: pkg.fmri.PkgFmri }, - "child_op_vectors": [ ( str, [ li.LinkedImageName ], {}, bool ) ], - "children_ignored": [ li.LinkedImageName ], - "children_nop": [ li.LinkedImageName ], - "children_planned": [ li.LinkedImageName ], - "install_actions": [ _ActionPlan ], - "elided_actions": [ _ActionPlan ], - "li_pfacets": pkg.facet.Facets, - "li_ppkgs": frozenset([ pkg.fmri.PkgFmri ]), - "li_props": { li.PROP_NAME: li.LinkedImageName }, - "pkg_plans": [ pkg.client.pkgplan.PkgPlan ], - "release_notes": (bool, []), - "removal_actions": [ _ActionPlan ], - "removed_groups": { str: pkg.fmri.PkgFmri }, - "removed_users": { str: pkg.fmri.PkgFmri }, - "update_actions": [ _ActionPlan ], - } - - __state__commonize = frozenset([ + ), + "_old_facets": pkg.facet.Facets, + "_new_facets": pkg.facet.Facets, + "_rm_aliases": {str: set()}, + "_preserved": { + "moved": [[str, str]], + "removed": [[str]], + "installed": [[str]], + "updated": [[str]], + }, + # Messaging looks like: + # {"item_id": {"sub_item_id": [], "messages": []}} + "_item_msgs": collections.defaultdict(dict), + "_pkg_actuators": {str: {str: [str]}}, + "added_groups": {str: pkg.fmri.PkgFmri}, + "added_users": {str: pkg.fmri.PkgFmri}, + "child_op_vectors": [(str, [li.LinkedImageName], {}, bool)], + "children_ignored": [li.LinkedImageName], + "children_nop": [li.LinkedImageName], + "children_planned": [li.LinkedImageName], + "install_actions": [_ActionPlan], + "elided_actions": [_ActionPlan], + "li_pfacets": pkg.facet.Facets, + "li_ppkgs": frozenset([pkg.fmri.PkgFmri]), + "li_props": {li.PROP_NAME: li.LinkedImageName}, + "pkg_plans": [pkg.client.pkgplan.PkgPlan], + "release_notes": (bool, []), + "removal_actions": [_ActionPlan], + "removed_groups": {str: pkg.fmri.PkgFmri}, + "removed_users": {str: pkg.fmri.PkgFmri}, + "update_actions": [_ActionPlan], + } + + __state__commonize = frozenset( + [ pkg.actions.generic.NSG, pkg.client.pkgplan.PkgPlan, pkg.fmri.PkgFmri, - ]) - - def __init__(self, op=None): - self.state = UNEVALUATED - self._op = op - - # - # Properties set when state >= EVALUATED_PKGS - # - self._image_lm = None - self._cfg_mediators = {} - self._varcets_change = False - self._new_variants = None - self._old_facets = None - self._new_facets = None - self._facet_change = False - self._masked_facet_change = False - self._new_mediators = collections.defaultdict(set) - self._mediators_change = False - self._new_avoid_obs = (set(), set(), set()) - self._fmri_changes = [] # install (None, fmri) - # remove (oldfmri, None) - # update (oldfmri, newfmri|oldfmri) - self._preserved = { - "moved": [], - "removed": [], - "installed": [], - "updated": [], - } - self._solver_summary = [] - self._solver_errors = None - self.li_attach = False - self.li_ppkgs = frozenset() - self.li_ppubs = None - self.li_props = {} - self._li_pkg_updates = True - self._item_msgs = collections.defaultdict(dict) - - # - # Properties set when state >= EVALUATED_OK - # - # raw actions - self.pkg_plans = [] - # merged actions - self.removal_actions = [] - self.update_actions = [] - self.install_actions = [] - self.elided_actions = [] - # smf and other actuators (driver actions get added during - # execution stage). - self._actuators = pkg.client.actuator.Actuator() - # Used to track users and groups that are part of operation. - self.added_groups = {} - self.added_users = {} - self.removed_groups = {} - self.removed_users = {} - # release notes that are part of this operation - self.release_notes = (False, []) - # plan properties - self._cbytes_added = 0 # size of compressed files - self._bytes_added = 0 # size of files added - self._need_boot_archive = None - # child properties - self.child_op_vectors = [] - self.children_ignored = None - self.children_planned = [] - self.children_nop = [] - # driver aliases to remove - self._rm_aliases = {} - - # - # Properties set when state >= EXECUTED_OK - # - self._salvaged = [] - self.release_notes_name = None - - # - # Set by imageplan.set_be_options() - # - self._backup_be = None - self._backup_be_name = None - self._new_be = None - self._be_name = None - self._be_activate = False - - # Accessed via imageplan.update_index - self._update_index = True - - # stats about the current image - self._cbytes_avail = 0 # avail space for downloads - self._bytes_avail = 0 # avail space for fs - - self._act_timed_out = False - - # Pkg actuators - self._pkg_actuators = {} - - @staticmethod - def getstate(obj, je_state=None, reset_volatiles=False): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - # Access to protected member; pylint: disable=W0212 - - if reset_volatiles: - # backup and clear volatiles - _bytes_avail = obj._bytes_avail - _cbytes_avail = obj._cbytes_avail - obj._bytes_avail = obj._cbytes_avail = 0 - - name = PlanDescription.__name__ - state = pkg.misc.json_encode(name, obj.__dict__, - PlanDescription.__state__desc, - commonize=PlanDescription.__state__commonize, - je_state=je_state) - - # add a state version encoding identifier - state[name] = 0 - - if reset_volatiles: - obj._bytes_avail = obj._bytes_avail - obj._cbytes_avail = obj._cbytes_avail - - return state - - @staticmethod - def setstate(obj, state, jd_state=None): - """Update the state of this object using previously serialized - state obtained via getstate().""" - # Access to protected member; pylint: disable=W0212 - - # get the name of the object we're dealing with - name = PlanDescription.__name__ - - # version check and delete the encoding identifier - assert state[name] == 0 - del state[name] - - # decode serialized state into python objects - state = pkg.misc.json_decode(name, state, - PlanDescription.__state__desc, - commonize=PlanDescription.__state__commonize, - jd_state=jd_state) - - # bulk update - obj.__dict__.update(state) - - # clear volatiles - obj._cbytes_avail = 0 - obj._bytes_avail = 0 - - @staticmethod - def fromstate(state, jd_state=None): - """Allocate a new object using previously serialized state - obtained via getstate().""" - rv = PlanDescription() - PlanDescription.setstate(rv, state, jd_state) - return rv - - def _save(self, fobj, reset_volatiles=False): - """Save a json encoded representation of this plan - description objects into the specified file object.""" - - state = PlanDescription.getstate(self, - reset_volatiles=reset_volatiles) - try: - fobj.truncate() - json.dump(state, fobj) - fobj.flush() - except OSError as e: - # Access to protected member; pylint: disable=W0212 - raise apx._convert_error(e) - - del state - - def _load(self, fobj): - """Load a json encoded representation of a plan description - from the specified file object.""" - - assert self.state == UNEVALUATED - - try: - fobj.seek(0) - state = json.load(fobj, object_hook=pkg.misc.json_hook) - except OSError as e: - # Access to protected member; pylint: disable=W0212 - raise apx._convert_error(e) - - PlanDescription.setstate(self, state) - del state - - def _executed_ok(self): - """A private interface used after a plan is successfully - invoked to free up memory.""" - - # reduce memory consumption - self._fmri_changes = [] - self._preserved = {} - # We have to save the timed_out state. - self._act_timed_out = self._actuators.timed_out - self._actuators = pkg.client.actuator.Actuator() - self.added_groups = {} - self.added_users = {} - self.removed_groups = {} - self.removed_users = {} - - @property - def executed(self): - """A boolean indicating if we attempted to execute this - plan.""" - return self.state in [EXECUTED_OK, EXECUTED_ERROR] - - @property - def services(self): - """Returns a list of string tuples describing affected services - (action, SMF FMRI).""" - return sorted( - ((str(a), str(smf_fmri)) - for a, smf_fmri in self._actuators.get_services_list()), - key=operator.itemgetter(0, 1) + ] + ) + + def __init__(self, op=None): + self.state = UNEVALUATED + self._op = op + + # + # Properties set when state >= EVALUATED_PKGS + # + self._image_lm = None + self._cfg_mediators = {} + self._varcets_change = False + self._new_variants = None + self._old_facets = None + self._new_facets = None + self._facet_change = False + self._masked_facet_change = False + self._new_mediators = collections.defaultdict(set) + self._mediators_change = False + self._new_avoid_obs = (set(), set(), set()) + self._fmri_changes = [] # install (None, fmri) + # remove (oldfmri, None) + # update (oldfmri, newfmri|oldfmri) + self._preserved = { + "moved": [], + "removed": [], + "installed": [], + "updated": [], + } + self._solver_summary = [] + self._solver_errors = None + self.li_attach = False + self.li_ppkgs = frozenset() + self.li_ppubs = None + self.li_props = {} + self._li_pkg_updates = True + self._item_msgs = collections.defaultdict(dict) + + # + # Properties set when state >= EVALUATED_OK + # + # raw actions + self.pkg_plans = [] + # merged actions + self.removal_actions = [] + self.update_actions = [] + self.install_actions = [] + self.elided_actions = [] + # smf and other actuators (driver actions get added during + # execution stage). + self._actuators = pkg.client.actuator.Actuator() + # Used to track users and groups that are part of operation. + self.added_groups = {} + self.added_users = {} + self.removed_groups = {} + self.removed_users = {} + # release notes that are part of this operation + self.release_notes = (False, []) + # plan properties + self._cbytes_added = 0 # size of compressed files + self._bytes_added = 0 # size of files added + self._need_boot_archive = None + # child properties + self.child_op_vectors = [] + self.children_ignored = None + self.children_planned = [] + self.children_nop = [] + # driver aliases to remove + self._rm_aliases = {} + + # + # Properties set when state >= EXECUTED_OK + # + self._salvaged = [] + self.release_notes_name = None + + # + # Set by imageplan.set_be_options() + # + self._backup_be = None + self._backup_be_name = None + self._new_be = None + self._be_name = None + self._be_activate = False + + # Accessed via imageplan.update_index + self._update_index = True + + # stats about the current image + self._cbytes_avail = 0 # avail space for downloads + self._bytes_avail = 0 # avail space for fs + + self._act_timed_out = False + + # Pkg actuators + self._pkg_actuators = {} + + @staticmethod + def getstate(obj, je_state=None, reset_volatiles=False): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + # Access to protected member; pylint: disable=W0212 + + if reset_volatiles: + # backup and clear volatiles + _bytes_avail = obj._bytes_avail + _cbytes_avail = obj._cbytes_avail + obj._bytes_avail = obj._cbytes_avail = 0 + + name = PlanDescription.__name__ + state = pkg.misc.json_encode( + name, + obj.__dict__, + PlanDescription.__state__desc, + commonize=PlanDescription.__state__commonize, + je_state=je_state, + ) + + # add a state version encoding identifier + state[name] = 0 + + if reset_volatiles: + obj._bytes_avail = obj._bytes_avail + obj._cbytes_avail = obj._cbytes_avail + + return state + + @staticmethod + def setstate(obj, state, jd_state=None): + """Update the state of this object using previously serialized + state obtained via getstate().""" + # Access to protected member; pylint: disable=W0212 + + # get the name of the object we're dealing with + name = PlanDescription.__name__ + + # version check and delete the encoding identifier + assert state[name] == 0 + del state[name] + + # decode serialized state into python objects + state = pkg.misc.json_decode( + name, + state, + PlanDescription.__state__desc, + commonize=PlanDescription.__state__commonize, + jd_state=jd_state, + ) + + # bulk update + obj.__dict__.update(state) + + # clear volatiles + obj._cbytes_avail = 0 + obj._bytes_avail = 0 + + @staticmethod + def fromstate(state, jd_state=None): + """Allocate a new object using previously serialized state + obtained via getstate().""" + rv = PlanDescription() + PlanDescription.setstate(rv, state, jd_state) + return rv + + def _save(self, fobj, reset_volatiles=False): + """Save a json encoded representation of this plan + description objects into the specified file object.""" + + state = PlanDescription.getstate(self, reset_volatiles=reset_volatiles) + try: + fobj.truncate() + json.dump(state, fobj) + fobj.flush() + except OSError as e: + # Access to protected member; pylint: disable=W0212 + raise apx._convert_error(e) + + del state + + def _load(self, fobj): + """Load a json encoded representation of a plan description + from the specified file object.""" + + assert self.state == UNEVALUATED + + try: + fobj.seek(0) + state = json.load(fobj, object_hook=pkg.misc.json_hook) + except OSError as e: + # Access to protected member; pylint: disable=W0212 + raise apx._convert_error(e) + + PlanDescription.setstate(self, state) + del state + + def _executed_ok(self): + """A private interface used after a plan is successfully + invoked to free up memory.""" + + # reduce memory consumption + self._fmri_changes = [] + self._preserved = {} + # We have to save the timed_out state. + self._act_timed_out = self._actuators.timed_out + self._actuators = pkg.client.actuator.Actuator() + self.added_groups = {} + self.added_users = {} + self.removed_groups = {} + self.removed_users = {} + + @property + def executed(self): + """A boolean indicating if we attempted to execute this + plan.""" + return self.state in [EXECUTED_OK, EXECUTED_ERROR] + + @property + def services(self): + """Returns a list of string tuples describing affected services + (action, SMF FMRI).""" + return sorted( + ( + (str(a), str(smf_fmri)) + for a, smf_fmri in self._actuators.get_services_list() + ), + key=operator.itemgetter(0, 1), + ) + + @property + def mediators(self): + """Returns a list of three-tuples containing information about + the mediators. The first element in the tuple is the name of + the mediator. The second element is a tuple containing the + original version and source and the new version and source of + the mediator. The third element is a tuple containing the + original implementation and source and new implementation and + source.""" + + ret = [] + + if not self._mediators_change or ( + not self._cfg_mediators and not self._new_mediators + ): + return ret + + def get_mediation(mediators, m): + # Missing docstring; pylint: disable=C0111 + mimpl = mver = mimpl_source = mver_source = None + if m in mediators: + mimpl = mediators[m].get("implementation") + mimpl_ver = mediators[m].get("implementation-version") + if mimpl_ver: + mimpl_ver = mimpl_ver.get_short_version() + if mimpl and mimpl_ver: + mimpl += "(@{0})".format(mimpl_ver) + mimpl_source = mediators[m].get("implementation-source") + + mver = mediators[m].get("version") + if mver: + mver = mver.get_short_version() + mver_source = mediators[m].get("version-source") + return mimpl, mver, mimpl_source, mver_source + + for m in sorted(set(self._new_mediators) | set(self._cfg_mediators)): + ( + orig_impl, + orig_ver, + orig_impl_source, + orig_ver_source, + ) = get_mediation(self._cfg_mediators, m) + new_impl, new_ver, new_impl_source, new_ver_source = get_mediation( + self._new_mediators, m + ) + + if ( + orig_ver == new_ver + and orig_ver_source == new_ver_source + and orig_impl == new_impl + and orig_impl_source == new_impl_source + ): + # Mediation not changed. + continue + + out = ( + m, + ((orig_ver, orig_ver_source), (new_ver, new_ver_source)), + ((orig_impl, orig_impl_source), (new_impl, new_impl_source)), + ) + + ret.append(out) + + return ret + + def find_removal(self, filename): + """Has the named file been tagged for removal ?""" + + for ap in self.removal_actions: + if ap.src.name == "file" and ap.src.attrs["path"] == filename: + return True + + return False + + def get_mediators(self): + """Returns list of strings describing mediator changes.""" + + ret = [] + for m, ver, impl in sorted(self.mediators): + ((orig_ver, orig_ver_source), (new_ver, new_ver_source)) = ver + ((orig_impl, orig_impl_source), (new_impl, new_impl_source)) = impl + out = "mediator {0}:\n".format(m) + if orig_ver and new_ver: + out += ( + " version: {0} ({1} default)" + " -> {2} ({3} default)\n".format( + orig_ver, orig_ver_source, new_ver, new_ver_source + ) + ) + elif orig_ver: + out += ( + " version: {0} ({1} default)" + " -> None\n".format(orig_ver, orig_ver_source) + ) + elif new_ver: + out += ( + " version: None -> " + "{0} ({1} default)\n".format(new_ver, new_ver_source) ) - @property - def mediators(self): - """Returns a list of three-tuples containing information about - the mediators. The first element in the tuple is the name of - the mediator. The second element is a tuple containing the - original version and source and the new version and source of - the mediator. The third element is a tuple containing the - original implementation and source and new implementation and - source.""" - - ret = [] - - if not self._mediators_change or \ - (not self._cfg_mediators and not self._new_mediators): - return ret - - def get_mediation(mediators, m): - # Missing docstring; pylint: disable=C0111 - mimpl = mver = mimpl_source = \ - mver_source = None - if m in mediators: - mimpl = mediators[m].get( - "implementation") - mimpl_ver = mediators[m].get( - "implementation-version") - if mimpl_ver: - mimpl_ver = \ - mimpl_ver.get_short_version() - if mimpl and mimpl_ver: - mimpl += "(@{0})".format(mimpl_ver) - mimpl_source = mediators[m].get( - "implementation-source") - - mver = mediators[m].get("version") - if mver: - mver = mver.get_short_version() - mver_source = mediators[m].get( - "version-source") - return mimpl, mver, mimpl_source, mver_source - - for m in sorted(set(self._new_mediators) | - set(self._cfg_mediators)): - orig_impl, orig_ver, orig_impl_source, \ - orig_ver_source = get_mediation( - self._cfg_mediators, m) - new_impl, new_ver, new_impl_source, new_ver_source = \ - get_mediation(self._new_mediators, m) - - if orig_ver == new_ver and \ - orig_ver_source == new_ver_source and \ - orig_impl == new_impl and \ - orig_impl_source == new_impl_source: - # Mediation not changed. - continue - - out = (m, - ((orig_ver, orig_ver_source), - (new_ver, new_ver_source)), - ((orig_impl, orig_impl_source), - (new_impl, new_impl_source))) - - ret.append(out) - - return ret - - def find_removal(self, filename): - """ Has the named file been tagged for removal ?""" - - for ap in self.removal_actions: - if ap.src.name == "file" and ap.src.attrs["path"] == filename: - return True - - return False - - def get_mediators(self): - """Returns list of strings describing mediator changes.""" - - ret = [] - for m, ver, impl in sorted(self.mediators): - ((orig_ver, orig_ver_source), - (new_ver, new_ver_source)) = ver - ((orig_impl, orig_impl_source), - (new_impl, new_impl_source)) = impl - out = "mediator {0}:\n".format(m) - if orig_ver and new_ver: - out += " version: {0} ({1} default)" \ - " -> {2} ({3} default)\n".format(orig_ver, - orig_ver_source, new_ver, new_ver_source) - elif orig_ver: - out += " version: {0} ({1} default)" \ - " -> None\n".format(orig_ver, - orig_ver_source) - elif new_ver: - out += " version: None -> " \ - "{0} ({1} default)\n".format(new_ver, - new_ver_source) - - if orig_impl and new_impl: - out += " implementation: {0} ({1} default)" \ - " -> {2} ({3} default)\n".format(orig_impl, - orig_impl_source, new_impl, new_impl_source) - elif orig_impl: - out += " implementation: {0} ({1} default)" \ - " -> None\n".format(orig_impl, - orig_impl_source) - elif new_impl: - out += " implementation: None -> " \ - "{0} ({1} default)\n".format(new_impl, - new_impl_source) - ret.append(out) - return ret - - @property - def plan_desc(self): - """Get the proposed fmri changes.""" - return self._fmri_changes - - @property - def salvaged(self): - """A list of tuples of items that were salvaged during plan - execution. Each tuple is of the form (original_path, - salvage_path). Where 'original_path' is the path of the item - before it was salvaged, and 'salvage_path' is where the item was - moved to. This property is only valid after plan execution - has completed.""" - return self._salvaged - - @property - def varcets(self): - """Returns a tuple of two lists containing the facet and - variant changes in this plan. - - The variant list contains tuples with the following format: - - (, ) - - The facet list contains tuples with the following format: - - (, , , , - , ) - - """ - - vs = [] - if self._new_variants: - vs = list(self._new_variants.items()) - - # sort results by variant name - vs.sort(key=lambda x: x[0]) - - fs = [] - if self._new_facets is None: - return (vs, fs) - - # create new dictionaries that index facets by name and - # source: - # dict[()] = (, ) - old_facets = dict([ - ((f, src), (v, masked)) - # not-an-iterable self._old_facets; - # pylint: disable=E1133 - for f in self._old_facets - # W0212 Access to a protected member - # pylint: disable=W0212 - for v, src, masked in self._old_facets._src_values(f) - ]) - new_facets = dict([ - ((f, src), (v, masked)) - # not-an-iterable self._new_facets; - # pylint: disable=E1133 - for f in self._new_facets - # W0212 Access to a protected member - # pylint: disable=W0212 - for v, src, masked in self._new_facets._src_values(f) - ]) - - # check for removed facets - for f, src in set(old_facets) - set(new_facets): - v, masked = old_facets[f, src] - fs.append((f, None, v, src, masked, False)) - - # check for added facets - for f, src in set(new_facets) - set(old_facets): - v, masked = new_facets[f, src] - fs.append((f, v, None, src, False, masked)) - - # check for changing facets - for f, src in set(old_facets) & set(new_facets): - if old_facets[f, src] == new_facets[f, src]: - continue - v_old, m_old = old_facets[f, src] - v_new, m_new = new_facets[f, src] - fs.append((f, v_new, v_old, src, m_old, m_new)) - - # sort results by facet name - fs.sort(key=lambda x: x[0]) - - return (vs, fs) - - def get_varcets(self): - """Returns a formatted list of strings representing the - variant/facet changes in this plan""" - vs, fs = self.varcets - rv = [ - "variant {0}: {1}".format(name[8:], val) - for (name, val) in vs - ] - masked_str = _(" (masked)") - for name, v_new, v_old, src, m_old, m_new in fs: - m_old = m_old and masked_str or "" - m_new = m_new and masked_str or "" - msg = " facet {0} ({1}): {2}{3} -> {4}{5}".format( - name[6:], src, v_old, m_old, v_new, m_new) - rv.append(msg) - return rv - - def get_changes(self): - """A generator function that yields tuples of PackageInfo - objects of the form (src_pi, dest_pi). - - If 'src_pi' is None, then 'dest_pi' is the package being - installed. - - If 'src_pi' is not None, and 'dest_pi' is None, 'src_pi' - is the package being removed. - - If 'src_pi' is not None, and 'dest_pi' is not None, - then 'src_pi' is the original version of the package, - and 'dest_pi' is the new version of the package it is - being upgraded to.""" - - key = operator.attrgetter("origin_fmri", "destination_fmri") - for pp in sorted(self.pkg_plans, key=key): - sfmri = pp.origin_fmri - dfmri = pp.destination_fmri - if sfmri == dfmri: - sinfo = dinfo = PackageInfo.build_from_fmri( - sfmri) - else: - sinfo = PackageInfo.build_from_fmri(sfmri) - dinfo = PackageInfo.build_from_fmri(dfmri) - yield (sinfo, dinfo) - - def get_editable_changes(self): - """This function returns a tuple of generators that yield tuples - of the form (src, dest) of the preserved ("editable") files that - will be installed, moved, removed, or updated. The returned - list of generators is (moved, removed, installed, updated).""" - - return ( - (entry for entry in self._preserved["moved"]), - ((entry[0], None) for entry in self._preserved["removed"]), - ((None, entry[0]) - for entry in self._preserved["installed"]), - ((entry[0], entry[0]) - for entry in self._preserved["updated"]), + if orig_impl and new_impl: + out += ( + " implementation: {0} ({1} default)" + " -> {2} ({3} default)\n".format( + orig_impl, orig_impl_source, new_impl, new_impl_source + ) + ) + elif orig_impl: + out += ( + " implementation: {0} ({1} default)" + " -> None\n".format(orig_impl, orig_impl_source) + ) + elif new_impl: + out += ( + " implementation: None -> " + "{0} ({1} default)\n".format(new_impl, new_impl_source) + ) + ret.append(out) + return ret + + @property + def plan_desc(self): + """Get the proposed fmri changes.""" + return self._fmri_changes + + @property + def salvaged(self): + """A list of tuples of items that were salvaged during plan + execution. Each tuple is of the form (original_path, + salvage_path). Where 'original_path' is the path of the item + before it was salvaged, and 'salvage_path' is where the item was + moved to. This property is only valid after plan execution + has completed.""" + return self._salvaged + + @property + def varcets(self): + """Returns a tuple of two lists containing the facet and + variant changes in this plan. + + The variant list contains tuples with the following format: + + (, ) + + The facet list contains tuples with the following format: + + (, , , , + , ) + + """ + + vs = [] + if self._new_variants: + vs = list(self._new_variants.items()) + + # sort results by variant name + vs.sort(key=lambda x: x[0]) + + fs = [] + if self._new_facets is None: + return (vs, fs) + + # create new dictionaries that index facets by name and + # source: + # dict[()] = (, ) + old_facets = dict( + [ + ((f, src), (v, masked)) + # not-an-iterable self._old_facets; + # pylint: disable=E1133 + for f in self._old_facets + # W0212 Access to a protected member + # pylint: disable=W0212 + for v, src, masked in self._old_facets._src_values(f) + ] + ) + new_facets = dict( + [ + ((f, src), (v, masked)) + # not-an-iterable self._new_facets; + # pylint: disable=E1133 + for f in self._new_facets + # W0212 Access to a protected member + # pylint: disable=W0212 + for v, src, masked in self._new_facets._src_values(f) + ] + ) + + # check for removed facets + for f, src in set(old_facets) - set(new_facets): + v, masked = old_facets[f, src] + fs.append((f, None, v, src, masked, False)) + + # check for added facets + for f, src in set(new_facets) - set(old_facets): + v, masked = new_facets[f, src] + fs.append((f, v, None, src, False, masked)) + + # check for changing facets + for f, src in set(old_facets) & set(new_facets): + if old_facets[f, src] == new_facets[f, src]: + continue + v_old, m_old = old_facets[f, src] + v_new, m_new = new_facets[f, src] + fs.append((f, v_new, v_old, src, m_old, m_new)) + + # sort results by facet name + fs.sort(key=lambda x: x[0]) + + return (vs, fs) + + def get_varcets(self): + """Returns a formatted list of strings representing the + variant/facet changes in this plan""" + vs, fs = self.varcets + rv = ["variant {0}: {1}".format(name[8:], val) for (name, val) in vs] + masked_str = _(" (masked)") + for name, v_new, v_old, src, m_old, m_new in fs: + m_old = m_old and masked_str or "" + m_new = m_new and masked_str or "" + msg = " facet {0} ({1}): {2}{3} -> {4}{5}".format( + name[6:], src, v_old, m_old, v_new, m_new + ) + rv.append(msg) + return rv + + def get_changes(self): + """A generator function that yields tuples of PackageInfo + objects of the form (src_pi, dest_pi). + + If 'src_pi' is None, then 'dest_pi' is the package being + installed. + + If 'src_pi' is not None, and 'dest_pi' is None, 'src_pi' + is the package being removed. + + If 'src_pi' is not None, and 'dest_pi' is not None, + then 'src_pi' is the original version of the package, + and 'dest_pi' is the new version of the package it is + being upgraded to.""" + + key = operator.attrgetter("origin_fmri", "destination_fmri") + for pp in sorted(self.pkg_plans, key=key): + sfmri = pp.origin_fmri + dfmri = pp.destination_fmri + if sfmri == dfmri: + sinfo = dinfo = PackageInfo.build_from_fmri(sfmri) + else: + sinfo = PackageInfo.build_from_fmri(sfmri) + dinfo = PackageInfo.build_from_fmri(dfmri) + yield (sinfo, dinfo) + + def get_editable_changes(self): + """This function returns a tuple of generators that yield tuples + of the form (src, dest) of the preserved ("editable") files that + will be installed, moved, removed, or updated. The returned + list of generators is (moved, removed, installed, updated).""" + + return ( + (entry for entry in self._preserved["moved"]), + ((entry[0], None) for entry in self._preserved["removed"]), + ((None, entry[0]) for entry in self._preserved["installed"]), + ((entry[0], entry[0]) for entry in self._preserved["updated"]), + ) + + def get_actions(self): + """A generator function that yields action change descriptions + in the order they will be performed.""" + + # Unused variable '%s'; pylint: disable=W0612 + for pplan, o_act, d_act in itertools.chain( + self.removal_actions, self.update_actions, self.install_actions + ): + # pylint: enable=W0612 + yield "{0} -> {1}".format(o_act, d_act) + + def get_elided_actions(self) -> Iterator[tuple[object, object]]: + for pplan, o_act, d_act in self.elided_actions: + yield (o_act, d_act) + + def has_release_notes(self): + """True if there are release notes for this plan""" + return bool(self.release_notes[1]) + + def must_display_notes(self): + """True if the release notes must be displayed""" + return self.release_notes[0] + + def get_release_notes(self): + """A generator that returns the release notes for this plan""" + for notes in self.release_notes[1]: + yield notes + + def get_licenses(self, pfmri=None): + """A generator function that yields information about the + licenses related to the current plan in tuples of the form + (dest_fmri, src, dest, accepted, displayed) for the given + package FMRI or all packages in the plan. This is only + available for licenses that are being installed or updated. + + 'dest_fmri' is the FMRI of the package being installed. + + 'src' is a LicenseInfo object if the license of the related + package is being updated; otherwise it is None. + + 'dest' is the LicenseInfo object for the license that is being + installed. + + 'accepted' is a boolean value indicating that the license has + been marked as accepted for the current plan. + + 'displayed' is a boolean value indicating that the license has + been marked as displayed for the current plan.""" + + for pp in self.pkg_plans: + dfmri = pp.destination_fmri + if pfmri and dfmri != pfmri: + continue + + # Unused variable; pylint: disable=W0612 + for lid, entry in pp.get_licenses(): + src = entry["src"] + src_li = None + if src: + src_li = LicenseInfo(pp.origin_fmri, src, img=pp.image) + + dest = entry["dest"] + dest_li = None + if dest: + dest_li = LicenseInfo( + pp.destination_fmri, dest, img=pp.image + ) + + yield ( + pp.destination_fmri, + src_li, + dest_li, + entry["accepted"], + entry["displayed"], ) - def get_actions(self): - """A generator function that yields action change descriptions - in the order they will be performed.""" - - # Unused variable '%s'; pylint: disable=W0612 - for pplan, o_act, d_act in itertools.chain( - self.removal_actions, - self.update_actions, - self.install_actions): - # pylint: enable=W0612 - yield "{0} -> {1}".format(o_act, d_act) - - def get_elided_actions(self) -> Iterator[tuple[object, object]]: - for pplan, o_act, d_act in self.elided_actions: - yield (o_act, d_act) - - def has_release_notes(self): - """True if there are release notes for this plan""" - return bool(self.release_notes[1]) - - def must_display_notes(self): - """True if the release notes must be displayed""" - return self.release_notes[0] - - def get_release_notes(self): - """A generator that returns the release notes for this plan""" - for notes in self.release_notes[1]: - yield notes - - def get_licenses(self, pfmri=None): - """A generator function that yields information about the - licenses related to the current plan in tuples of the form - (dest_fmri, src, dest, accepted, displayed) for the given - package FMRI or all packages in the plan. This is only - available for licenses that are being installed or updated. - - 'dest_fmri' is the FMRI of the package being installed. - - 'src' is a LicenseInfo object if the license of the related - package is being updated; otherwise it is None. - - 'dest' is the LicenseInfo object for the license that is being - installed. - - 'accepted' is a boolean value indicating that the license has - been marked as accepted for the current plan. - - 'displayed' is a boolean value indicating that the license has - been marked as displayed for the current plan.""" - - for pp in self.pkg_plans: - dfmri = pp.destination_fmri - if pfmri and dfmri != pfmri: - continue - - # Unused variable; pylint: disable=W0612 - for lid, entry in pp.get_licenses(): - src = entry["src"] - src_li = None - if src: - src_li = LicenseInfo(pp.origin_fmri, - src, img=pp.image) - - dest = entry["dest"] - dest_li = None - if dest: - dest_li = LicenseInfo( - pp.destination_fmri, dest, - img=pp.image) - - yield (pp.destination_fmri, src_li, dest_li, - entry["accepted"], entry["displayed"]) - - if pfmri: - break - - def get_solver_errors(self): - """Returns a list of strings for all FMRIs evaluated by the - solver explaining why they were rejected. (All packages - found in solver's trim database.) Only available if - DebugValues["plan"] was set when the plan was created. - """ - - assert self.state >= EVALUATED_PKGS, \ - "{0} >= {1}".format(self.state, EVALUATED_PKGS) - - # in case this operation doesn't use solver - if self._solver_errors is None: - return [] - - return self._solver_errors - - def get_parsable_plan(self, parsable_version, child_images=None, - api_inst=None): - """Display the parsable version of the plan.""" - - assert parsable_version == 0, \ - "parsable_version was {0!r}".format(parsable_version) - # Set the default values. - added_fmris = [] - removed_fmris = [] - changed_fmris = [] - affected_fmris = [] - backup_be_created = False - new_be_created = False - backup_be_name = None - be_name = None - boot_archive_rebuilt = False - be_activated = True - space_available = None - space_required = None - facets_changed = [] - variants_changed = [] - services_affected = [] - mediators_changed = [] - editables_changed = [] - licenses = [] - - if child_images is None: - child_images = [] - release_notes = [] - if self: - for rem, add in self.get_changes(): - assert rem is not None or add is not None - if rem is not None and add is not None: - # Lists of lists are used here becuase - # json will convert lists of tuples - # into lists of lists anyway. - if rem.fmri == add.fmri: - affected_fmris.append(str(rem)) - else: - changed_fmris.append( - [str(rem), str(add)]) - elif rem is not None: - removed_fmris.append(str(rem)) - else: - added_fmris.append(str(add)) - variants_changed, facets_changed = self.varcets - backup_be_created = self.backup_be - new_be_created = self.new_be - backup_be_name = self.backup_be_name - be_name = self.be_name - boot_archive_rebuilt = self.update_boot_archive - be_activated = self.activate_be - space_available = self.bytes_avail - space_required = self.bytes_added - services_affected = self.services - mediators_changed = self.mediators - - emoved, eremoved, einstalled, eupdated = \ - self.get_editable_changes() - - # Lists of lists are used here to ensure a consistent - # ordering and because tuples will be converted to - # lists anyway; a dictionary would be more logical for - # the top level entries, but would make testing more - # difficult and this is a small, known set anyway. - emoved = [[e for e in entry] for entry in emoved] - eremoved = [src for (src, dest) in eremoved] - einstalled = [dest for (src, dest) in einstalled] - eupdated = [dest for (src, dest) in eupdated] - if emoved: - editables_changed.append(["moved", emoved]) - if eremoved: - editables_changed.append(["removed", eremoved]) - if einstalled: - editables_changed.append(["installed", - einstalled]) - if eupdated: - editables_changed.append(["updated", eupdated]) - - for n in self.get_release_notes(): - release_notes.append(n) - - for dfmri, src_li, dest_li, dummy_acc, dummy_disp in \ - self.get_licenses(): - src_tup = () - if src_li: - src_tup = (str(src_li.fmri), - src_li.license, src_li.get_text(), - src_li.must_accept, - src_li.must_display) - dest_tup = () - if dest_li: - dest_tup = (str(dest_li.fmri), - dest_li.license, dest_li.get_text(), - dest_li.must_accept, - dest_li.must_display) - licenses.append( - (str(dfmri), src_tup, dest_tup)) - - # If api_inst is set, mark licenses as - # displayed. - if api_inst: - api_inst.set_plan_license_status(dfmri, - dest_li.license, displayed=True) - - # The image name for the parent image is always None. If this - # image is a child image, then the image name will be set when - # the parent image processes this dictionary. - ret = { - "activate-be": be_activated, - "add-packages": sorted(added_fmris), - "affect-packages": sorted(affected_fmris), - "affect-services": sorted(services_affected), - "backup-be-name": backup_be_name, - "be-name": be_name, - "boot-archive-rebuild": boot_archive_rebuilt, - "change-facets": sorted(facets_changed), - "change-editables": editables_changed, - "change-mediators": sorted(mediators_changed), - "change-packages": sorted(changed_fmris), - "change-variants": sorted(variants_changed), - "child-images": child_images, - "create-backup-be": backup_be_created, - "create-new-be": new_be_created, - "image-name": None, - "item-messages": self.get_parsable_item_messages(), - "licenses": sorted(licenses, - key=lambda x: (x[0], x[1], x[2])), - "release-notes": release_notes, - "remove-packages": sorted(removed_fmris), - "space-available": space_available, - "space-required": space_required, - "version": parsable_version - } - return ret - - def get_parsable_item_messages(self): - """Return parsable item messages.""" - return self._item_msgs - - def add_item_message(self, item_id, msg_time, msg_level, msg_text, - msg_type=MSG_GENERAL, parent=None): - """Add a new message with its time, type and text for an - item.""" - if parent: - item_key = parent - sub_item = item_id - else: - item_key = item_id - sub_item = "messages" - if self.state >= PREEXECUTED_OK: - msg_stage = OP_STAGE_EXEC - elif self.state >= EVALUATED_OK: - msg_stage = OP_STAGE_PREP + if pfmri: + break + + def get_solver_errors(self): + """Returns a list of strings for all FMRIs evaluated by the + solver explaining why they were rejected. (All packages + found in solver's trim database.) Only available if + DebugValues["plan"] was set when the plan was created. + """ + + assert self.state >= EVALUATED_PKGS, "{0} >= {1}".format( + self.state, EVALUATED_PKGS + ) + + # in case this operation doesn't use solver + if self._solver_errors is None: + return [] + + return self._solver_errors + + def get_parsable_plan( + self, parsable_version, child_images=None, api_inst=None + ): + """Display the parsable version of the plan.""" + + assert parsable_version == 0, "parsable_version was {0!r}".format( + parsable_version + ) + # Set the default values. + added_fmris = [] + removed_fmris = [] + changed_fmris = [] + affected_fmris = [] + backup_be_created = False + new_be_created = False + backup_be_name = None + be_name = None + boot_archive_rebuilt = False + be_activated = True + space_available = None + space_required = None + facets_changed = [] + variants_changed = [] + services_affected = [] + mediators_changed = [] + editables_changed = [] + licenses = [] + + if child_images is None: + child_images = [] + release_notes = [] + if self: + for rem, add in self.get_changes(): + assert rem is not None or add is not None + if rem is not None and add is not None: + # Lists of lists are used here becuase + # json will convert lists of tuples + # into lists of lists anyway. + if rem.fmri == add.fmri: + affected_fmris.append(str(rem)) + else: + changed_fmris.append([str(rem), str(add)]) + elif rem is not None: + removed_fmris.append(str(rem)) else: - msg_stage = OP_STAGE_PLAN - # First level messaging looks like: - # {"item_id": {"messages": [msg_payload ...]}} - # Second level messaging looks like: - # {"item_id": {"sub_item_id": [msg_payload ...]}}. - msg_payload = {"msg_time": msg_time, - "msg_level": msg_level, - "msg_type": msg_type, - "msg_text": msg_text, - "msg_stage": msg_stage} - self._item_msgs[item_key].setdefault(sub_item, - []).append(msg_payload) - - def extend_item_messages(self, item_id, messages, parent=None): - """Add new messages to an item.""" - if parent: - item_key = parent - sub_item = item_id - else: - item_key = item_id - sub_item = "messages" - self._item_msgs[item_key].setdefault(sub_item, []).extend( - messages) - - @staticmethod - def __msg_dict2list(msg): - """Convert a message dictionary to a list.""" - return [msg["msg_time"], msg["msg_level"], msg["msg_type"], - msg["msg_text"]] - - def __gen_ordered_msg(self, stages): - """Generate ordered messages.""" - ordered_list = [] - for item_id in self._item_msgs: - # To make the first level messages come - # relatively earlier. - if "messages" in self._item_msgs[item_id]: - for msg in self._item_msgs[item_id]["messages"]: - if (stages is not None and - msg["msg_stage"] not in stages): - continue - ordered_list.append([item_id, None] + \ - PlanDescription. \ - __msg_dict2list(msg)) - msg["msg_stage"] = OP_STAGE_PRINTED - for si, si_list in six.iteritems( - self._item_msgs[item_id]): - if si == "messages": - continue - for msg in si_list: - if (stages is not None and - msg["msg_stage"] not in stages): - continue - ordered_list.append([si, item_id] + \ - PlanDescription. \ - __msg_dict2list(msg)) - msg["msg_stage"] = OP_STAGE_PRINTED - for entry in sorted(ordered_list, key=operator.itemgetter(2)): - yield entry - - def __gen_unordered_msg(self, stages): - """Generate unordered messages.""" - for item_id in self._item_msgs: - for si, si_list in six.iteritems( - self._item_msgs[item_id]): - if si == "messages": - iid = item_id - pid = None - else: - iid = si - pid = item_id - for mp in si_list: - if (stages is not None and - mp["msg_stage"] not in stages): - continue - mp["msg_stage"] = OP_STAGE_PRINTED - yield([iid, pid] + \ - PlanDescription.__msg_dict2list(mp)) - - def gen_item_messages(self, ordered=False, stages=None): - """Return all item messages. - - 'ordered' is an optional boolean value that indicates that - item messages will be sorted by msg_time. If False, item - messages will be in an arbitrary order. - - 'stages' is an optional list or set of the stages of messages - to return.""" - - if ordered: - return self.__gen_ordered_msg(stages) - else: - return self.__gen_unordered_msg(stages) - - def set_actuator_timeout(self, timeout): - """Set timeout for synchronous actuators.""" - assert type(timeout) == int, "Actuator timeout must be an "\ - "integer." - self._actuators.set_timeout(timeout) - - def add_pkg_actuator(self, trigger_pkg, exec_op, cpkg): - """Add a pkg actuator to the plan. The internal dictionary looks - like this: - { trigger_pkg: { - exec_op : [ changed pkg, ... ], - ... - }, - ... - } - """ - - if trigger_pkg in self._pkg_actuators: - if exec_op in self._pkg_actuators[trigger_pkg]: - self._pkg_actuators[trigger_pkg][ - exec_op].append(cpkg) - self._pkg_actuators[trigger_pkg][exec_op].sort() - else: - self._pkg_actuators[trigger_pkg][exec_op] = \ - [cpkg] + added_fmris.append(str(add)) + variants_changed, facets_changed = self.varcets + backup_be_created = self.backup_be + new_be_created = self.new_be + backup_be_name = self.backup_be_name + be_name = self.be_name + boot_archive_rebuilt = self.update_boot_archive + be_activated = self.activate_be + space_available = self.bytes_avail + space_required = self.bytes_added + services_affected = self.services + mediators_changed = self.mediators + + emoved, eremoved, einstalled, eupdated = self.get_editable_changes() + + # Lists of lists are used here to ensure a consistent + # ordering and because tuples will be converted to + # lists anyway; a dictionary would be more logical for + # the top level entries, but would make testing more + # difficult and this is a small, known set anyway. + emoved = [[e for e in entry] for entry in emoved] + eremoved = [src for (src, dest) in eremoved] + einstalled = [dest for (src, dest) in einstalled] + eupdated = [dest for (src, dest) in eupdated] + if emoved: + editables_changed.append(["moved", emoved]) + if eremoved: + editables_changed.append(["removed", eremoved]) + if einstalled: + editables_changed.append(["installed", einstalled]) + if eupdated: + editables_changed.append(["updated", eupdated]) + + for n in self.get_release_notes(): + release_notes.append(n) + + for ( + dfmri, + src_li, + dest_li, + dummy_acc, + dummy_disp, + ) in self.get_licenses(): + src_tup = () + if src_li: + src_tup = ( + str(src_li.fmri), + src_li.license, + src_li.get_text(), + src_li.must_accept, + src_li.must_display, + ) + dest_tup = () + if dest_li: + dest_tup = ( + str(dest_li.fmri), + dest_li.license, + dest_li.get_text(), + dest_li.must_accept, + dest_li.must_display, + ) + licenses.append((str(dfmri), src_tup, dest_tup)) + + # If api_inst is set, mark licenses as + # displayed. + if api_inst: + api_inst.set_plan_license_status( + dfmri, dest_li.license, displayed=True + ) + + # The image name for the parent image is always None. If this + # image is a child image, then the image name will be set when + # the parent image processes this dictionary. + ret = { + "activate-be": be_activated, + "add-packages": sorted(added_fmris), + "affect-packages": sorted(affected_fmris), + "affect-services": sorted(services_affected), + "backup-be-name": backup_be_name, + "be-name": be_name, + "boot-archive-rebuild": boot_archive_rebuilt, + "change-facets": sorted(facets_changed), + "change-editables": editables_changed, + "change-mediators": sorted(mediators_changed), + "change-packages": sorted(changed_fmris), + "change-variants": sorted(variants_changed), + "child-images": child_images, + "create-backup-be": backup_be_created, + "create-new-be": new_be_created, + "image-name": None, + "item-messages": self.get_parsable_item_messages(), + "licenses": sorted(licenses, key=lambda x: (x[0], x[1], x[2])), + "release-notes": release_notes, + "remove-packages": sorted(removed_fmris), + "space-available": space_available, + "space-required": space_required, + "version": parsable_version, + } + return ret + + def get_parsable_item_messages(self): + """Return parsable item messages.""" + return self._item_msgs + + def add_item_message( + self, + item_id, + msg_time, + msg_level, + msg_text, + msg_type=MSG_GENERAL, + parent=None, + ): + """Add a new message with its time, type and text for an + item.""" + if parent: + item_key = parent + sub_item = item_id + else: + item_key = item_id + sub_item = "messages" + if self.state >= PREEXECUTED_OK: + msg_stage = OP_STAGE_EXEC + elif self.state >= EVALUATED_OK: + msg_stage = OP_STAGE_PREP + else: + msg_stage = OP_STAGE_PLAN + # First level messaging looks like: + # {"item_id": {"messages": [msg_payload ...]}} + # Second level messaging looks like: + # {"item_id": {"sub_item_id": [msg_payload ...]}}. + msg_payload = { + "msg_time": msg_time, + "msg_level": msg_level, + "msg_type": msg_type, + "msg_text": msg_text, + "msg_stage": msg_stage, + } + self._item_msgs[item_key].setdefault(sub_item, []).append(msg_payload) + + def extend_item_messages(self, item_id, messages, parent=None): + """Add new messages to an item.""" + if parent: + item_key = parent + sub_item = item_id + else: + item_key = item_id + sub_item = "messages" + self._item_msgs[item_key].setdefault(sub_item, []).extend(messages) + + @staticmethod + def __msg_dict2list(msg): + """Convert a message dictionary to a list.""" + return [ + msg["msg_time"], + msg["msg_level"], + msg["msg_type"], + msg["msg_text"], + ] + + def __gen_ordered_msg(self, stages): + """Generate ordered messages.""" + ordered_list = [] + for item_id in self._item_msgs: + # To make the first level messages come + # relatively earlier. + if "messages" in self._item_msgs[item_id]: + for msg in self._item_msgs[item_id]["messages"]: + if stages is not None and msg["msg_stage"] not in stages: + continue + ordered_list.append( + [item_id, None] + PlanDescription.__msg_dict2list(msg) + ) + msg["msg_stage"] = OP_STAGE_PRINTED + for si, si_list in six.iteritems(self._item_msgs[item_id]): + if si == "messages": + continue + for msg in si_list: + if stages is not None and msg["msg_stage"] not in stages: + continue + ordered_list.append( + [si, item_id] + PlanDescription.__msg_dict2list(msg) + ) + msg["msg_stage"] = OP_STAGE_PRINTED + for entry in sorted(ordered_list, key=operator.itemgetter(2)): + yield entry + + def __gen_unordered_msg(self, stages): + """Generate unordered messages.""" + for item_id in self._item_msgs: + for si, si_list in six.iteritems(self._item_msgs[item_id]): + if si == "messages": + iid = item_id + pid = None else: - self._pkg_actuators[trigger_pkg] = {exec_op: [cpkg]} - - def gen_pkg_actuators(self): - """Pkg actuators which got triggered by operation.""" - for trigger_pkg in sorted(self._pkg_actuators): - yield (trigger_pkg, self._pkg_actuators[trigger_pkg]) - - @property - def actuator_timed_out(self): - """Indicates that a synchronous actuator timed out.""" - return self._act_timed_out - - @property - def plan_type(self): - """Return the type of plan that was created (ex: - API_OP_UPDATE).""" - return self._op - - @property - def update_index(self): - """Boolean indicating if indexes will be updated as part of an - image-modifying operation.""" - return self._update_index - - @property - def backup_be(self): - """Either None, True, or False. If None then executing this - plan may create a backup BE. If False, then executing this - plan will not create a backup BE. If True, then executing - this plan will create a backup BE.""" - return self._backup_be - - @property - def be_name(self): - """The name of a new BE that will be created if this plan is - executed.""" - return self._be_name - - @property - def backup_be_name(self): - """The name of a new backup BE that will be created if this - plan is executed.""" - return self._backup_be_name - - @property - def activate_be(self): - """A boolean value indicating whether any new boot environment - will be set active on next boot.""" - return self._be_activate - - @property - def reboot_needed(self): - """A boolean value indicating that execution of the plan will - require a restart of the system to take effect if the target - image is an existing boot environment.""" - return self._actuators.reboot_needed() - - @property - def new_be(self): - """A boolean value indicating that execution of the plan will - take place in a clone of the current live environment""" - return self._new_be - - @property - def update_boot_archive(self): - """A boolean value indicating whether or not the boot archive - will be rebuilt""" - return self._need_boot_archive - - @property - def bytes_added(self): - """Estimated number of bytes added""" - return self._bytes_added - - @property - def cbytes_added(self): - """Estimated number of download cache bytes added""" - return self._cbytes_added - - @property - def bytes_avail(self): - """Estimated number of bytes available in image /""" - return self._bytes_avail - - @property - def cbytes_avail(self): - """Estimated number of bytes available in download cache""" - return self._cbytes_avail - - @property - def new_facets(self): - """If facets are changing, this is the new set of facets being - applied.""" - if self._new_facets is None: - return None - return pkg.facet.Facets(self._new_facets) + iid = si + pid = item_id + for mp in si_list: + if stages is not None and mp["msg_stage"] not in stages: + continue + mp["msg_stage"] = OP_STAGE_PRINTED + yield ([iid, pid] + PlanDescription.__msg_dict2list(mp)) + + def gen_item_messages(self, ordered=False, stages=None): + """Return all item messages. + + 'ordered' is an optional boolean value that indicates that + item messages will be sorted by msg_time. If False, item + messages will be in an arbitrary order. + + 'stages' is an optional list or set of the stages of messages + to return.""" + + if ordered: + return self.__gen_ordered_msg(stages) + else: + return self.__gen_unordered_msg(stages) + + def set_actuator_timeout(self, timeout): + """Set timeout for synchronous actuators.""" + assert type(timeout) == int, "Actuator timeout must be an " "integer." + self._actuators.set_timeout(timeout) + + def add_pkg_actuator(self, trigger_pkg, exec_op, cpkg): + """Add a pkg actuator to the plan. The internal dictionary looks + like this: + { trigger_pkg: { + exec_op : [ changed pkg, ... ], + ... + }, + ... + } + """ + + if trigger_pkg in self._pkg_actuators: + if exec_op in self._pkg_actuators[trigger_pkg]: + self._pkg_actuators[trigger_pkg][exec_op].append(cpkg) + self._pkg_actuators[trigger_pkg][exec_op].sort() + else: + self._pkg_actuators[trigger_pkg][exec_op] = [cpkg] + else: + self._pkg_actuators[trigger_pkg] = {exec_op: [cpkg]} + + def gen_pkg_actuators(self): + """Pkg actuators which got triggered by operation.""" + for trigger_pkg in sorted(self._pkg_actuators): + yield (trigger_pkg, self._pkg_actuators[trigger_pkg]) + + @property + def actuator_timed_out(self): + """Indicates that a synchronous actuator timed out.""" + return self._act_timed_out + + @property + def plan_type(self): + """Return the type of plan that was created (ex: + API_OP_UPDATE).""" + return self._op + + @property + def update_index(self): + """Boolean indicating if indexes will be updated as part of an + image-modifying operation.""" + return self._update_index + + @property + def backup_be(self): + """Either None, True, or False. If None then executing this + plan may create a backup BE. If False, then executing this + plan will not create a backup BE. If True, then executing + this plan will create a backup BE.""" + return self._backup_be + + @property + def be_name(self): + """The name of a new BE that will be created if this plan is + executed.""" + return self._be_name + + @property + def backup_be_name(self): + """The name of a new backup BE that will be created if this + plan is executed.""" + return self._backup_be_name + + @property + def activate_be(self): + """A boolean value indicating whether any new boot environment + will be set active on next boot.""" + return self._be_activate + + @property + def reboot_needed(self): + """A boolean value indicating that execution of the plan will + require a restart of the system to take effect if the target + image is an existing boot environment.""" + return self._actuators.reboot_needed() + + @property + def new_be(self): + """A boolean value indicating that execution of the plan will + take place in a clone of the current live environment""" + return self._new_be + + @property + def update_boot_archive(self): + """A boolean value indicating whether or not the boot archive + will be rebuilt""" + return self._need_boot_archive + + @property + def bytes_added(self): + """Estimated number of bytes added""" + return self._bytes_added + + @property + def cbytes_added(self): + """Estimated number of download cache bytes added""" + return self._cbytes_added + + @property + def bytes_avail(self): + """Estimated number of bytes available in image /""" + return self._bytes_avail + + @property + def cbytes_avail(self): + """Estimated number of bytes available in download cache""" + return self._cbytes_avail + + @property + def new_facets(self): + """If facets are changing, this is the new set of facets being + applied.""" + if self._new_facets is None: + return None + return pkg.facet.Facets(self._new_facets) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/printengine.py b/src/modules/client/printengine.py index 96c99b924..7693352f1 100644 --- a/src/modules/client/printengine.py +++ b/src/modules/client/printengine.py @@ -43,309 +43,313 @@ class PrintEngineException(Exception): - """Exception indicating the failure to create PrintEngine.""" - def __str__(self): - return "PrintEngineException: {0}".format(" ".join(self.args)) + """Exception indicating the failure to create PrintEngine.""" + + def __str__(self): + return "PrintEngineException: {0}".format(" ".join(self.args)) + class PrintEngine(six.with_metaclass(ABCMeta, object)): - """Abstract class defining what a PrintEngine must know how to do.""" + """Abstract class defining what a PrintEngine must know how to do.""" - def __init__(self): - pass + def __init__(self): + pass - @abstractmethod - def isslow(self): - """Returns true if out_file is 'slow' (<=9600 baud).""" - pass + @abstractmethod + def isslow(self): + """Returns true if out_file is 'slow' (<=9600 baud).""" + pass - @abstractmethod - def cprint(self, *args, **kwargs): - """Core print routine. Must act basically like py3k's print - routine. For some printengines, additional behaviors can be - indicated via keyword args.""" - pass + @abstractmethod + def cprint(self, *args, **kwargs): + """Core print routine. Must act basically like py3k's print + routine. For some printengines, additional behaviors can be + indicated via keyword args.""" + pass - @abstractmethod - def flush(self): - """Make the terminal or line ready for output by another - subsystem. This commonly might entail issuing a newline.""" - pass + @abstractmethod + def flush(self): + """Make the terminal or line ready for output by another + subsystem. This commonly might entail issuing a newline.""" + pass class POSIXPrintEngine(PrintEngine): - """This is an engine for printing output to the end user which has been - tweaked for IPS's printing needs.""" - - def __init__(self, out_file, ttymode): - """Create a printengine. - - out_file -- the file object to print to - ttymode -- Boolean indicating need for tty support. Throws - PrintEngineException if out_file can't support. - """ - PrintEngine.__init__(self) - - self._out_file = out_file - self.__nchars_printed = 0 - self.__needs_nl = 0 - self.__cr = None - self.__ttymode = ttymode - - if not self.__ttymode: - return - - self.__putp_re = re.compile(r"\$<[0-9]+>") - self.__el = None - if not self._out_file.isatty(): - raise PrintEngineException("Not a TTY") - - try: - curses.setupterm(None, self._out_file.fileno()) - self.__cr = curses.tigetstr("cr") - self.__el = curses.tigetstr("el") - except curses.error: - raise PrintEngineException("Unknown terminal " - "'{0}'".format(os.environ.get("TERM", ""))) - - def putp(self, string): - """This routine loosely emulates python's curses.putp, but - works on whatever our output file is, instead just stdout""" - - assert self.__ttymode - - # Hardware terminals are pretty much gone now; we choose - # to drop delays specified in termcap (delays are in the - # form: $<[0-9]+>). - self._out_file.write(self.__putp_re.sub("", force_str(string))) - - def isslow(self): - """Returns true if out_file is 'slow' (<=9600 baud).""" - b = termios.B38400 # assume it's fast if we can't tell. - try: - b = termios.tcgetattr(self._out_file)[5] - except termios.error: - pass - return b <= termios.B9600 - - def erase(self): - """Send sequence to erase the current line to _out_file.""" - if self.__el: - self.putp(self.__cr) - self.putp(self.__el) - self.putp(self.__cr) - else: - # fallback mode if we have no el; overwrite with - # spaces. - self.putp(self.__cr) - self._out_file.write(self.__nchars_printed * ' ') - self.putp(self.__cr) - - def cprint(self, *args, **kwargs): - """Core print routine. Acts largely like py3k's print command, - (supports 'sep' and 'end' kwargs) with an extension: - - erase=true: Erase any content on the present line, intended for - use in overwriting.""" - - sep = kwargs.get("sep", ' ') - outmsg = sep.join(args) + kwargs.get("end", '\n') - - if kwargs.get("erase"): - assert self.__ttymode - self.erase() - # account for the erase setting the number of chars - # printed back to 0. - self.__nchars_printed = 0 - - # - # Setting __needs_nl is how _cprint works together with - # the flush entrypoint. If we're partially through - # writing a line (which we know by inspecting the - # line and the "end" value), then we know that if we - # get flush()'d by a consumer, we need to issue an - # additional newline. - # - if outmsg != "" and not outmsg.endswith("\n"): - self.__needs_nl = True - - # find the rightmost newline in the msg - npos = outmsg.rfind("\n") - if npos == -1: - self.__nchars_printed += len(outmsg) - else: - # there was an nl or cr, so only the portion - # after that counts. - self.__nchars_printed = len(outmsg) - (npos + 1) - - try: - self._out_file.write(outmsg) - self._out_file.flush() - # - # if indeed we printed a newline at the end, we know - # that an additional newline is definitely not needed on - # flush. - # - if outmsg.endswith("\n"): - self.__needs_nl = False - except IOError as e: - if e.errno == errno.EPIPE: - raise PipeError(e) - raise - - def flush(self): - """If we're in the middle of writing a line, this tries to - write a newline in order to allow clean output after flush().""" - try: - if self.__needs_nl: - self._out_file.write("\n") - self.__needs_nl = False - self._out_file.flush() - except IOError: - # we consider this to be harmless. - pass + """This is an engine for printing output to the end user which has been + tweaked for IPS's printing needs.""" + + def __init__(self, out_file, ttymode): + """Create a printengine. + + out_file -- the file object to print to + ttymode -- Boolean indicating need for tty support. Throws + PrintEngineException if out_file can't support. + """ + PrintEngine.__init__(self) + + self._out_file = out_file + self.__nchars_printed = 0 + self.__needs_nl = 0 + self.__cr = None + self.__ttymode = ttymode + + if not self.__ttymode: + return + + self.__putp_re = re.compile(r"\$<[0-9]+>") + self.__el = None + if not self._out_file.isatty(): + raise PrintEngineException("Not a TTY") + + try: + curses.setupterm(None, self._out_file.fileno()) + self.__cr = curses.tigetstr("cr") + self.__el = curses.tigetstr("el") + except curses.error: + raise PrintEngineException( + "Unknown terminal " "'{0}'".format(os.environ.get("TERM", "")) + ) + + def putp(self, string): + """This routine loosely emulates python's curses.putp, but + works on whatever our output file is, instead just stdout""" + + assert self.__ttymode + + # Hardware terminals are pretty much gone now; we choose + # to drop delays specified in termcap (delays are in the + # form: $<[0-9]+>). + self._out_file.write(self.__putp_re.sub("", force_str(string))) + + def isslow(self): + """Returns true if out_file is 'slow' (<=9600 baud).""" + b = termios.B38400 # assume it's fast if we can't tell. + try: + b = termios.tcgetattr(self._out_file)[5] + except termios.error: + pass + return b <= termios.B9600 + + def erase(self): + """Send sequence to erase the current line to _out_file.""" + if self.__el: + self.putp(self.__cr) + self.putp(self.__el) + self.putp(self.__cr) + else: + # fallback mode if we have no el; overwrite with + # spaces. + self.putp(self.__cr) + self._out_file.write(self.__nchars_printed * " ") + self.putp(self.__cr) + + def cprint(self, *args, **kwargs): + """Core print routine. Acts largely like py3k's print command, + (supports 'sep' and 'end' kwargs) with an extension: + + erase=true: Erase any content on the present line, intended for + use in overwriting.""" + + sep = kwargs.get("sep", " ") + outmsg = sep.join(args) + kwargs.get("end", "\n") + + if kwargs.get("erase"): + assert self.__ttymode + self.erase() + # account for the erase setting the number of chars + # printed back to 0. + self.__nchars_printed = 0 + + # + # Setting __needs_nl is how _cprint works together with + # the flush entrypoint. If we're partially through + # writing a line (which we know by inspecting the + # line and the "end" value), then we know that if we + # get flush()'d by a consumer, we need to issue an + # additional newline. + # + if outmsg != "" and not outmsg.endswith("\n"): + self.__needs_nl = True + + # find the rightmost newline in the msg + npos = outmsg.rfind("\n") + if npos == -1: + self.__nchars_printed += len(outmsg) + else: + # there was an nl or cr, so only the portion + # after that counts. + self.__nchars_printed = len(outmsg) - (npos + 1) + + try: + self._out_file.write(outmsg) + self._out_file.flush() + # + # if indeed we printed a newline at the end, we know + # that an additional newline is definitely not needed on + # flush. + # + if outmsg.endswith("\n"): + self.__needs_nl = False + except IOError as e: + if e.errno == errno.EPIPE: + raise PipeError(e) + raise + + def flush(self): + """If we're in the middle of writing a line, this tries to + write a newline in order to allow clean output after flush().""" + try: + if self.__needs_nl: + self._out_file.write("\n") + self.__needs_nl = False + self._out_file.flush() + except IOError: + # we consider this to be harmless. + pass class LoggingPrintEngine(PrintEngine): - """This class adapts a printengine such that it issues its output to a - python logger from the logging module. Note that This class is used by - the AI (install) engine. - - The basic trick here is to use a StringIO in place of an actual file. - We then have the POSIX print engine issue its I/O to the StringIO, then - splitlines() the buffer and see if there are any complete lines that we - can output. If so, each complete line is issued to the logger, and any - remainder is put back into the StringIO for subsequent display.""" - - def __init__(self, logger, loglevel): - PrintEngine.__init__(self) - self._logger = logger - self._loglevel = loglevel - self._stringio = six.StringIO() - self._pxpe = POSIXPrintEngine(self._stringio, False) - - def isslow(self): - """Returns true if out_file is 'slow' (<=9600 baud).""" - return False - - def cprint(self, *args, **kwargs): - """Accumulates output into a buffer, emitting messages to - the _logger when full lines are available.""" - self._pxpe.cprint(*args, **kwargs) - - lines = self._stringio.getvalue().splitlines(True) - line = "" - for line in lines: - if line.endswith("\n"): - # write out, stripping the newline - self._logger.log(self._loglevel, line[:-1]) - self._stringio.seek(0) - self._stringio.truncate(0) - # anything left without a newline? Put it back. - if not line.endswith("\n"): - self._stringio.write(line) - - def flush(self): - """Log any partial line we've got left.""" - val = self._stringio.getvalue() - if val: - # should only ever have a partial line - assert not "\n" in val - self._logger.log(self._loglevel, val) - self._stringio.seek(0) - self._stringio.truncate(0) + """This class adapts a printengine such that it issues its output to a + python logger from the logging module. Note that This class is used by + the AI (install) engine. + + The basic trick here is to use a StringIO in place of an actual file. + We then have the POSIX print engine issue its I/O to the StringIO, then + splitlines() the buffer and see if there are any complete lines that we + can output. If so, each complete line is issued to the logger, and any + remainder is put back into the StringIO for subsequent display.""" + + def __init__(self, logger, loglevel): + PrintEngine.__init__(self) + self._logger = logger + self._loglevel = loglevel + self._stringio = six.StringIO() + self._pxpe = POSIXPrintEngine(self._stringio, False) + + def isslow(self): + """Returns true if out_file is 'slow' (<=9600 baud).""" + return False + + def cprint(self, *args, **kwargs): + """Accumulates output into a buffer, emitting messages to + the _logger when full lines are available.""" + self._pxpe.cprint(*args, **kwargs) + + lines = self._stringio.getvalue().splitlines(True) + line = "" + for line in lines: + if line.endswith("\n"): + # write out, stripping the newline + self._logger.log(self._loglevel, line[:-1]) + self._stringio.seek(0) + self._stringio.truncate(0) + # anything left without a newline? Put it back. + if not line.endswith("\n"): + self._stringio.write(line) + + def flush(self): + """Log any partial line we've got left.""" + val = self._stringio.getvalue() + if val: + # should only ever have a partial line + assert not "\n" in val + self._logger.log(self._loglevel, val) + self._stringio.seek(0) + self._stringio.truncate(0) def test_logging_printengine(output_file): - """Test driver for logging print engine. This is maintained as a - standalone function in order to support the 'runprintengine' test - utility in $SRC/tests/interactive/runprintengine.py. It is also - called by the test suite.""" - - logger = logging.getLogger('test') - ch = logging.StreamHandler(output_file) - logger.addHandler(ch) - - pe = LoggingPrintEngine(logger, logging.WARNING) - pe.cprint("Testing logging print engine. ", end='') - pe.cprint("Did you see this? ", end='') - pe.cprint("And this?") - pe.cprint("If the previous three sentences are on the same line, " - "it's working.") - pe.cprint("You need to see one more line after this one.") - pe.cprint("This should be the last line, printed by flushing", end='') - # just test that it works - pe.isslow() - pe.flush() + """Test driver for logging print engine. This is maintained as a + standalone function in order to support the 'runprintengine' test + utility in $SRC/tests/interactive/runprintengine.py. It is also + called by the test suite.""" + + logger = logging.getLogger("test") + ch = logging.StreamHandler(output_file) + logger.addHandler(ch) + + pe = LoggingPrintEngine(logger, logging.WARNING) + pe.cprint("Testing logging print engine. ", end="") + pe.cprint("Did you see this? ", end="") + pe.cprint("And this?") + pe.cprint( + "If the previous three sentences are on the same line, " "it's working." + ) + pe.cprint("You need to see one more line after this one.") + pe.cprint("This should be the last line, printed by flushing", end="") + # just test that it works + pe.isslow() + pe.flush() def test_posix_printengine(output_file, ttymode): - """Test driver for POSIX print engine. This is maintained as a - standalone function in order to support the 'runprintengine' test - utility in $SRC/tests/interactive/runprintengine.py; it is also - called by the test suite.""" - - pe = POSIXPrintEngine(output_file, ttymode=ttymode) - - standout = "" - sgr0 = "" - if ttymode: - # We assume that setupterm() has been called already. - standout = curses.tigetstr("smso") or "" - sgr0 = curses.tigetstr("sgr0") or "" - - pe.cprint("Testing POSIX print engine; ttymode is {0}\n".format( - ttymode)) - - # If we're not in ttymode, then the testing is simple. - if not ttymode: - pe.cprint("testing 1 2 3") - pe.cprint("testing flush (2)") - pe.flush() - return - - # We assume setupterm() has been called. + """Test driver for POSIX print engine. This is maintained as a + standalone function in order to support the 'runprintengine' test + utility in $SRC/tests/interactive/runprintengine.py; it is also + called by the test suite.""" + + pe = POSIXPrintEngine(output_file, ttymode=ttymode) + + standout = "" + sgr0 = "" + if ttymode: + # We assume that setupterm() has been called already. standout = curses.tigetstr("smso") or "" sgr0 = curses.tigetstr("sgr0") or "" - pe.cprint("Now we'll print something and then erase it;") - pe.cprint("you should see a blank line below this line.") - pe.cprint("IF YOU CAN SEE THIS, THE TEST HAS FAILED", end='') - pe.cprint("", erase=True) - - pe.cprint("You should see an X swishing back and forth; from") - pe.cprint("left to right it should be inverse.") - # Unused variable 'y'; pylint: disable=W0612 - for y in range(0, 2): - for x in range(0, 30, 1): - pe.cprint(" " * x, erase=True, end='') - pe.putp(standout) - pe.cprint("X", end='') - pe.putp(sgr0) - time.sleep(0.050) - for x in range(30, -1, -1): - pe.cprint(" " * x + "X", erase=True, end='') - time.sleep(0.050) - pe.cprint("", erase=True) + + pe.cprint("Testing POSIX print engine; ttymode is {0}\n".format(ttymode)) + + # If we're not in ttymode, then the testing is simple. + if not ttymode: pe.cprint("testing 1 2 3") - pe.cprint("testing XX XX XX", end="") - time.sleep(0.500) - pe.cprint("testing 4 5 6\ntesting XX XX XX", erase=True, end="") - time.sleep(0.500) - pe.cprint("testing YY YY", end="", erase=True) - time.sleep(0.500) - pe.cprint("testing 7 8 9\ntesting 10 11 12", erase=True) - time.sleep(0.500) - pe.cprint("testing ZZ ZZ ZZ ZZ ZZ", end="") - time.sleep(0.500) - pe.cprint("testing 13 14 15", erase=True) - - pe.cprint("testing flush...", end='') - pe.flush() - pe.cprint("This should be on the next line.") pe.cprint("testing flush (2)") pe.flush() - pe.cprint("This should be on the next line (with no nl's intervening).") - # just test that it works - pe.isslow() + return + + # We assume setupterm() has been called. + standout = curses.tigetstr("smso") or "" + sgr0 = curses.tigetstr("sgr0") or "" + pe.cprint("Now we'll print something and then erase it;") + pe.cprint("you should see a blank line below this line.") + pe.cprint("IF YOU CAN SEE THIS, THE TEST HAS FAILED", end="") + pe.cprint("", erase=True) + + pe.cprint("You should see an X swishing back and forth; from") + pe.cprint("left to right it should be inverse.") + # Unused variable 'y'; pylint: disable=W0612 + for y in range(0, 2): + for x in range(0, 30, 1): + pe.cprint(" " * x, erase=True, end="") + pe.putp(standout) + pe.cprint("X", end="") + pe.putp(sgr0) + time.sleep(0.050) + for x in range(30, -1, -1): + pe.cprint(" " * x + "X", erase=True, end="") + time.sleep(0.050) + pe.cprint("", erase=True) + pe.cprint("testing 1 2 3") + pe.cprint("testing XX XX XX", end="") + time.sleep(0.500) + pe.cprint("testing 4 5 6\ntesting XX XX XX", erase=True, end="") + time.sleep(0.500) + pe.cprint("testing YY YY", end="", erase=True) + time.sleep(0.500) + pe.cprint("testing 7 8 9\ntesting 10 11 12", erase=True) + time.sleep(0.500) + pe.cprint("testing ZZ ZZ ZZ ZZ ZZ", end="") + time.sleep(0.500) + pe.cprint("testing 13 14 15", erase=True) + + pe.cprint("testing flush...", end="") + pe.flush() + pe.cprint("This should be on the next line.") + pe.cprint("testing flush (2)") + pe.flush() + pe.cprint("This should be on the next line (with no nl's intervening).") + # just test that it works + pe.isslow() + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/progress.py b/src/modules/client/progress.py index e9a036d59..e0f8684aa 100644 --- a/src/modules/client/progress.py +++ b/src/modules/client/progress.py @@ -53,480 +53,500 @@ from pkg.client import global_settings from pkg.client import printengine + logger = global_settings.logger + class ProgressTrackerException(Exception): - """Thrown if a ProgressTracker determines that it can't be instantiated. - For example, the tracker which depends on a UNIX style terminal should - throw this exception if it can't find a valid terminal.""" - def __str__(self): - return "ProgressTrackerException: {0}".format( - " ".join(self.args)) - - -def format_pair(format1, v1, v2, scale=None, targetwidth=None, - format2=None): - """Format a pair of numbers 'v1' and 'v2' representing a fraction, such - as v1=3 v2=200, such that for all anticipated values of v1 (0 through - v2) , the result is a fixed width pair separated by '/': - - format_pair("{0:d}", 3, 200) --> " 3/200" - - 'format1' is the preferred number format. In the event that targetwidth - is specified and the width of (format1.format(v2) > targetwidth), then - 'format2' is used instead: - - format_pair("{0:.1f}", 20.322, 1000.23, - targetwidth=5, format2="{0:d}") --> " 20/1000" - - This provides a mechanism for downshifting the accuracy of an output - to preserve column width. - - Inputs are scaled (divided by 'scale') if scale is specified.""" - - if scale: - v1 /= float(scale) - v2 /= float(scale) - realformat = format1 - v2format = realformat.format(v2) - v2len = len(v2format) - if format2 and targetwidth and v2len > targetwidth: - # see if format2 is shorter. - # convert type 'float' to 'int'. - if "d" in format2: - v2format = format2.format(int(v2)) - else: - v2format = format2.format(v2) - v2len1 = len(v2format) - if v2len1 < v2len: - realformat = format2 - v2len = v2len1 - - formatcolon = realformat.find(":") - v1format = realformat[0:formatcolon + 1] + str(v2len) + \ - realformat[formatcolon + 1:] - if "d" in v1format: - return (v1format.format(int(v1))) + "/" + v2format - else: - return (v1format.format(v1)) + "/" + v2format + """Thrown if a ProgressTracker determines that it can't be instantiated. + For example, the tracker which depends on a UNIX style terminal should + throw this exception if it can't find a valid terminal.""" + def __str__(self): + return "ProgressTrackerException: {0}".format(" ".join(self.args)) -class SpeedEstimator(object): - """This class implements a rudimentary download speed estimator. - newdata() is used to indicate download progress; curl calls - us back pretty frequently so that's not a terrible way to - go. Download progress records are kept on a deque, and are - expired after self.interval seconds have elapsed. Speed estimates - are smoothed so that things don't bounce around too fast. - - The class also implements some heuristics designed to prevent - handing out crappy estimates early in the download.""" - - # INTERVAL describes the interval, in seconds, which is used to - # compute the download speed. - INTERVAL = 10.0 - # This is the decay rate (or how much we mix in the historical - # notion of speed into the current notion). - SMOOTHING = 0.98 - - def __init__(self, goalbytes): - - # Ok to modify this during operation. - self.goalbytes = goalbytes - self.__deque = deque() - self.__intervalbytes = 0 - self.__curtotal = 0 - self.__last_smooth_speed = None - self.__instartup = True - self.__noestimate = True - self.__starttime = None - self.__donetime = None - - @staticmethod - def format_speed(speed): - if speed is None: - return None - # - # A little hack to keep things tidy: if the length of - # the speedstr > 5, we whack off the floating point - # portion. - # - speedstr = misc.bytes_to_str(speed, "{num:>.1f}{shortunit}") - if speed < 1024 or len(speedstr) > 5: - speedstr = misc.bytes_to_str(speed, - "{num:d}{shortunit}") - speedstr += "/s" - return speedstr +def format_pair(format1, v1, v2, scale=None, targetwidth=None, format2=None): + """Format a pair of numbers 'v1' and 'v2' representing a fraction, such + as v1=3 v2=200, such that for all anticipated values of v1 (0 through + v2) , the result is a fixed width pair separated by '/': - def newdata(self, nbytes, timestamp=None): - """Add new data as it becomes available; timestamp can be - overridden, although this is primarily designed for testing.""" + format_pair("{0:d}", 3, 200) --> " 3/200" - # must be started before adding data (sorry) - assert self.__starttime + 'format1' is the preferred number format. In the event that targetwidth + is specified and the width of (format1.format(v2) > targetwidth), then + 'format2' is used instead: - # - # Step 1: Insert latest datum - # - curtime = timestamp if timestamp else time.time() - self.__curtotal += nbytes - self.__deque.append((curtime, nbytes)) - self.__intervalbytes += nbytes + format_pair("{0:.1f}", 20.322, 1000.23, + targetwidth=5, format2="{0:d}") --> " 20/1000" - # - # Step 2: Expunge old data - # - while len(self.__deque) > 0: - (ts, val) = self.__deque[0] - if ts < curtime - self.INTERVAL: - self.__intervalbytes -= val - self.__deque.popleft() - else: - break + This provides a mechanism for downshifting the accuracy of an output + to preserve column width. - # - # Step 3: Recompute the estimate - # - # compute time delta between front and back of deque - timelapse = self.__deque[-1][0] - self.__deque[0][0] + Inputs are scaled (divided by 'scale') if scale is specified.""" - if len(self.__deque) <= 1 or timelapse == 0.0: - # can't operate yet - self.__noestimate = True - return + if scale: + v1 /= float(scale) + v2 /= float(scale) + realformat = format1 + v2format = realformat.format(v2) + v2len = len(v2format) + if format2 and targetwidth and v2len > targetwidth: + # see if format2 is shorter. + # convert type 'float' to 'int'. + if "d" in format2: + v2format = format2.format(int(v2)) + else: + v2format = format2.format(v2) + v2len1 = len(v2format) + if v2len1 < v2len: + realformat = format2 + v2len = v2len1 + + formatcolon = realformat.find(":") + v1format = ( + realformat[0 : formatcolon + 1] + + str(v2len) + + realformat[formatcolon + 1 :] + ) + if "d" in v1format: + return (v1format.format(int(v1))) + "/" + v2format + else: + return (v1format.format(v1)) + "/" + v2format - # - # 'ratiocomplete' is just the percentage done. It is - # used to disable 'startup mode' if the d/l completes - # very rapidly. We'll always start giving the user an - # estimate once ratiocomplete >= 50%. - # pylint is picky about this message: - # old-division; pylint: disable=W1619 - ratiocomplete = 0.0 if self.goalbytes == 0 else \ - self.__curtotal / self.goalbytes - # - # Keep track of whether we're in the warmup phase. This - # is used to deny estimates to callers until we feel good - # about them. This is very heuristic; it's a higher bar than - # we use below for disabling the estimate, basically because - # we want to open with a solid estimate. - # - if self.__instartup and len(self.__deque) > 50 and \ - timelapse > (self.INTERVAL / 5.0): - self.__instartup = False +class SpeedEstimator(object): + """This class implements a rudimentary download speed estimator. + newdata() is used to indicate download progress; curl calls + us back pretty frequently so that's not a terrible way to + go. Download progress records are kept on a deque, and are + expired after self.interval seconds have elapsed. Speed estimates + are smoothed so that things don't bounce around too fast. + + The class also implements some heuristics designed to prevent + handing out crappy estimates early in the download.""" + + # INTERVAL describes the interval, in seconds, which is used to + # compute the download speed. + INTERVAL = 10.0 + # This is the decay rate (or how much we mix in the historical + # notion of speed into the current notion). + SMOOTHING = 0.98 + + def __init__(self, goalbytes): + # Ok to modify this during operation. + self.goalbytes = goalbytes + self.__deque = deque() + self.__intervalbytes = 0 + self.__curtotal = 0 + self.__last_smooth_speed = None + self.__instartup = True + self.__noestimate = True + self.__starttime = None + self.__donetime = None + + @staticmethod + def format_speed(speed): + if speed is None: + return None - # - # Even if we haven't accomplished the above requirements, - # exit startup mode when we're 1/3 done or more. - # - if self.__instartup and ratiocomplete > 0.33: - self.__instartup = False + # + # A little hack to keep things tidy: if the length of + # the speedstr > 5, we whack off the floating point + # portion. + # + speedstr = misc.bytes_to_str(speed, "{num:>.1f}{shortunit}") + if speed < 1024 or len(speedstr) > 5: + speedstr = misc.bytes_to_str(speed, "{num:d}{shortunit}") + speedstr += "/s" + return speedstr - # - # Take a look at the deque length as well as how much an - # interval's worth of data we have. If it is quite short, - # maybe the download has stalled out, or perhaps the user - # used ctrl-z and then resumed; disable the estimate until we - # build up more data. - # - self.__noestimate = bool(len(self.__deque) < 10 or - timelapse < (self.INTERVAL / 20.0)) - - curspeed = self.__intervalbytes / timelapse - - if self.__last_smooth_speed is None: - self.__last_smooth_speed = curspeed - else: - self.__last_smooth_speed = \ - int((self.SMOOTHING * self.__last_smooth_speed) + \ - ((1.0 - self.SMOOTHING) * curspeed)) - - def start(self, timestamp=None): - assert not self.__starttime - self.__starttime = timestamp if timestamp else time.time() - - def done(self, timestamp=None): - assert not self.__donetime - self.__donetime = timestamp if timestamp else time.time() - - def get_speed_estimate(self): - if self.__noestimate or self.__instartup or \ - not self.__last_smooth_speed: - return None - return int(self.__last_smooth_speed) - - def get_final_speed(self): - if self.__donetime is None: - return None - try: - # pylint is picky about this message: - # old-division; pylint: disable=W1619 - return self.goalbytes / self.elapsed() - except ZeroDivisionError: - return None - - def elapsed(self): - return None if self.__donetime is None else \ - self.__donetime - self.__starttime - - def __str__(self): - s = " 0: + (ts, val) = self.__deque[0] + if ts < curtime - self.INTERVAL: + self.__intervalbytes -= val + self.__deque.popleft() + else: + break - def reset(self): - self.__last_print_time = 0 + # + # Step 3: Recompute the estimate + # + # compute time delta between front and back of deque + timelapse = self.__deque[-1][0] - self.__deque[0][0] - def reset_now(self): - self.__last_print_time = time.time() + if len(self.__deque) <= 1 or timelapse == 0.0: + # can't operate yet + self.__noestimate = True + return # - # See if it has been more than __delay time since the last time we - # indicated that it was time to print. If this returns true, the - # caller should go ahead and print; this will not return true again - # until the 'delay' period has elapsed again. + # 'ratiocomplete' is just the percentage done. It is + # used to disable 'startup mode' if the d/l completes + # very rapidly. We'll always start giving the user an + # estimate once ratiocomplete >= 50%. + # pylint is picky about this message: + # old-division; pylint: disable=W1619 + ratiocomplete = ( + 0.0 if self.goalbytes == 0 else self.__curtotal / self.goalbytes + ) + # - def time_to_print(self): - tt = time.time() - if (tt - self.__last_print_time) < self.__delay: - return False - self.__last_print_time = tt - self.print_value += 1 - return True + # Keep track of whether we're in the warmup phase. This + # is used to deny estimates to callers until we feel good + # about them. This is very heuristic; it's a higher bar than + # we use below for disabling the estimate, basically because + # we want to open with a solid estimate. + # + if ( + self.__instartup + and len(self.__deque) > 50 + and timelapse > (self.INTERVAL / 5.0) + ): + self.__instartup = False + + # + # Even if we haven't accomplished the above requirements, + # exit startup mode when we're 1/3 done or more. + # + if self.__instartup and ratiocomplete > 0.33: + self.__instartup = False + + # + # Take a look at the deque length as well as how much an + # interval's worth of data we have. If it is quite short, + # maybe the download has stalled out, or perhaps the user + # used ctrl-z and then resumed; disable the estimate until we + # build up more data. + # + self.__noestimate = bool( + len(self.__deque) < 10 or timelapse < (self.INTERVAL / 20.0) + ) + + curspeed = self.__intervalbytes / timelapse + + if self.__last_smooth_speed is None: + self.__last_smooth_speed = curspeed + else: + self.__last_smooth_speed = int( + (self.SMOOTHING * self.__last_smooth_speed) + + ((1.0 - self.SMOOTHING) * curspeed) + ) + + def start(self, timestamp=None): + assert not self.__starttime + self.__starttime = timestamp if timestamp else time.time() + + def done(self, timestamp=None): + assert not self.__donetime + self.__donetime = timestamp if timestamp else time.time() + + def get_speed_estimate(self): + if ( + self.__noestimate + or self.__instartup + or not self.__last_smooth_speed + ): + return None + return int(self.__last_smooth_speed) + + def get_final_speed(self): + if self.__donetime is None: + return None + try: + # pylint is picky about this message: + # old-division; pylint: disable=W1619 + return self.goalbytes / self.elapsed() + except ZeroDivisionError: + return None + + def elapsed(self): + return ( + None + if self.__donetime is None + else self.__donetime - self.__starttime + ) + + def __str__(self): + s = "".format(self.name, self.items, info) + def __setattr__(self, attrname, value): + # + # Start 'starttime' when 'items' is first set (even to zero) + # Note that starttime is initially set to -1 to avoid starting + # the timer during __init__(). + # + # Special behavior only for 'items' and only when not resetting + if attrname != "items" or self.starttime == -1: + self.__dict__[attrname] = value + return + + if self.starttime is None: + assert not getattr(self, "endtime", None), ( + "can't set items after explicit done(). " + "Tried to set {0}={1} (is {2})".format( + attrname, value, self.__dict__[attrname] + ) + ) + self.starttime = time.time() + self.__dict__[attrname] = value + + def start(self): + assert self.endtime is None + if not self.starttime: + self.starttime = time.time() + + def done(self): + self.endtime = time.time() + + def elapsed(self): + if not self.starttime: + return 0.0 + endtime = self.endtime + if endtime is None: + endtime = time.time() + return endtime - self.starttime + + def __str__(self): + info = "" + if self.curinfo: + info = " ({0})".format(str(self.curinfo)) + return "<{0}: {1}{2}>".format(self.name, self.items, info) class GoalTrackerItem(TrackerItem): - """This class extends TrackerItem to include the notion of progress - towards some goal which is known in advance of beginning the operation - (such as downloading 37 packages). In addition to the features of - TrackerItem, this class provides helpful routines for conversion to - printable strings of the form " 3/100".""" - - def __init__(self, name): - TrackerItem.__init__(self, name) - self.goalitems = None - - def reset(self): - # See comment in superclass. - self.__init__(self.name) - - # start 'starttime' when items gets set to non-zero - def __setattr__(self, attrname, value): - # Special behavior only for 'items' and only when not resetting - if attrname != "items" or self.starttime == -1: - self.__dict__[attrname] = value - return - - assert not getattr(self, "endtime", None), \ - "can't set values after explicit done(). " \ - "Tried to set {0}={1} (is {2})".format( - attrname, value, self.__dict__[attrname]) - - # see if this is the first time we're setting items - if self.starttime is None: - if self.goalitems is None: - raise RuntimeError( - "Cannot alter items until goalitems is set") - self.starttime = time.time() - self.__dict__[attrname] = value - - def done(self, goalcheck=True): - # Arguments number differs from overridden method; - # pylint: disable=W0221 - TrackerItem.done(self) - - # See if we indeed met our goal. - if goalcheck and not self.metgoal(): - exstr = _("Goal mismatch '{name}': " - "expected goal: {expected}, " - "current value: {current}").format( - name=self.name, - expected=self.goalitems, - current=self.items) - logger.error("\n" + exstr) - assert self.metgoal(), exstr - - def metgoal(self): - if self.items == 0 and self.goalitems is None: - return True - return self.items == self.goalitems - - def pair(self): - if self.goalitems is None: - assert self.items == 0 - return format_pair("{0:d}", 0, 0) - return format_pair("{0:d}", self.items, self.goalitems) - - def pairplus1(self): - # For use when you want to display something happening, - # such as: Downloading item 3/3, since typically items is - # not incremented until after the operation completes. - # - # To ensure that we don't print 4/3 in the last iteration of - # output, we also account for that case. - if self.goalitems is None: - assert self.items == 0 - return format_pair("{0:d}", 1, 1) - if self.items == self.goalitems: - items = self.items - else: - items = self.items + 1 - return format_pair("{0:d}", items, self.goalitems) - - def pctdone(self): - """Returns progress towards a goal as a percentage. - i.e. 37 / 100 would yield 37.0""" - if self.goalitems is None or self.goalitems == 0: - return 0 - # pylint is picky about this message: - # old-division; pylint: disable=W1619 - return math.floor(100.0 * - self.items / self.goalitems) - - def __str__(self): - info = "" - if self.curinfo: - info = " ({0})".format(str(self.curinfo)) - return "<{0}: {1}{2}>".format(self.name, self.pair(), info) + """This class extends TrackerItem to include the notion of progress + towards some goal which is known in advance of beginning the operation + (such as downloading 37 packages). In addition to the features of + TrackerItem, this class provides helpful routines for conversion to + printable strings of the form " 3/100".""" + + def __init__(self, name): + TrackerItem.__init__(self, name) + self.goalitems = None + + def reset(self): + # See comment in superclass. + self.__init__(self.name) + + # start 'starttime' when items gets set to non-zero + def __setattr__(self, attrname, value): + # Special behavior only for 'items' and only when not resetting + if attrname != "items" or self.starttime == -1: + self.__dict__[attrname] = value + return + + assert not getattr(self, "endtime", None), ( + "can't set values after explicit done(). " + "Tried to set {0}={1} (is {2})".format( + attrname, value, self.__dict__[attrname] + ) + ) + + # see if this is the first time we're setting items + if self.starttime is None: + if self.goalitems is None: + raise RuntimeError("Cannot alter items until goalitems is set") + self.starttime = time.time() + self.__dict__[attrname] = value + + def done(self, goalcheck=True): + # Arguments number differs from overridden method; + # pylint: disable=W0221 + TrackerItem.done(self) + + # See if we indeed met our goal. + if goalcheck and not self.metgoal(): + exstr = _( + "Goal mismatch '{name}': " + "expected goal: {expected}, " + "current value: {current}" + ).format( + name=self.name, expected=self.goalitems, current=self.items + ) + logger.error("\n" + exstr) + assert self.metgoal(), exstr + + def metgoal(self): + if self.items == 0 and self.goalitems is None: + return True + return self.items == self.goalitems + + def pair(self): + if self.goalitems is None: + assert self.items == 0 + return format_pair("{0:d}", 0, 0) + return format_pair("{0:d}", self.items, self.goalitems) + + def pairplus1(self): + # For use when you want to display something happening, + # such as: Downloading item 3/3, since typically items is + # not incremented until after the operation completes. + # + # To ensure that we don't print 4/3 in the last iteration of + # output, we also account for that case. + if self.goalitems is None: + assert self.items == 0 + return format_pair("{0:d}", 1, 1) + if self.items == self.goalitems: + items = self.items + else: + items = self.items + 1 + return format_pair("{0:d}", items, self.goalitems) + + def pctdone(self): + """Returns progress towards a goal as a percentage. + i.e. 37 / 100 would yield 37.0""" + if self.goalitems is None or self.goalitems == 0: + return 0 + # pylint is picky about this message: + # old-division; pylint: disable=W1619 + return math.floor(100.0 * self.items / self.goalitems) + + def __str__(self): + info = "" + if self.curinfo: + info = " ({0})".format(str(self.curinfo)) + return "<{0}: {1}{2}>".format(self.name, self.pair(), info) + # # This implements a decorator which is used to mark methods in @@ -539,14 +559,16 @@ def __str__(self): # the required methods at __init__ time. # def pt_abstract(func): - # Unused argument 'args', 'kwargs'; pylint: disable=W0613 - @wraps(func) - def enforce_abstract(*args, **kwargs): - raise NotImplementedError("{0} is abstract in " - "superclass; you must implement it in your " - "subclass.".format(func.__name__)) + # Unused argument 'args', 'kwargs'; pylint: disable=W0613 + @wraps(func) + def enforce_abstract(*args, **kwargs): + raise NotImplementedError( + "{0} is abstract in " + "superclass; you must implement it in your " + "subclass.".format(func.__name__) + ) - return enforce_abstract + return enforce_abstract # @@ -556,2633 +578,2848 @@ def enforce_abstract(*args, **kwargs): # versus front-end APIs. # class ProgressTrackerBackend(object): - # allow def func(args): pass - # More than one statement on a line; pylint: disable=C0321 + # allow def func(args): pass + # More than one statement on a line; pylint: disable=C0321 - def __init__(self): pass + def __init__(self): + pass - # - # This set of methods should be regarded as abstract *and* protected. - # - @pt_abstract - def _output_flush(self): pass - - @pt_abstract - def _change_purpose(self, old_purpose, new_purpose): pass - - @pt_abstract - def _cache_cats_output(self, outspec): pass - - @pt_abstract - def _load_cat_cache_output(self, outspec): pass - - @pt_abstract - def _refresh_output_progress(self, outspec): pass - - @pt_abstract - def _plan_output(self, outspec, planitem): pass - - @pt_abstract - def _plan_output_all_done(self): pass - - @pt_abstract - def _mfst_fetch(self, outspec): pass - - @pt_abstract - def _mfst_commit(self, outspec): pass - - @pt_abstract - def _repo_ver_output(self, outspec, repository_scan=False): pass - - @pt_abstract - def _repo_ver_output_error(self, errors): pass - - @pt_abstract - def _repo_ver_output_done(self): pass - - def _repo_fix_output(self, outspec): pass - - @pt_abstract - def _repo_fix_output_error(self, errors): pass - - @pt_abstract - def _repo_fix_output_info(self, errors): pass - - @pt_abstract - def _repo_fix_output_done(self): pass - - @pt_abstract - def _archive_output(self, outspec): pass + # + # This set of methods should be regarded as abstract *and* protected. + # + @pt_abstract + def _output_flush(self): + pass - @pt_abstract - def _dl_output(self, outspec): pass + @pt_abstract + def _change_purpose(self, old_purpose, new_purpose): + pass - @pt_abstract - def _act_output(self, outspec, actionitem): pass + @pt_abstract + def _cache_cats_output(self, outspec): + pass - @pt_abstract - def _act_output_all_done(self): pass + @pt_abstract + def _load_cat_cache_output(self, outspec): + pass - @pt_abstract - def _job_output(self, outspec, jobitem): pass + @pt_abstract + def _refresh_output_progress(self, outspec): + pass - @pt_abstract - def _republish_output(self, outspec): pass + @pt_abstract + def _plan_output(self, outspec, planitem): + pass - @pt_abstract - def _lint_output(self, outspec): pass + @pt_abstract + def _plan_output_all_done(self): + pass - @pt_abstract - def _li_recurse_start_output(self): pass + @pt_abstract + def _mfst_fetch(self, outspec): + pass - @pt_abstract - def _li_recurse_end_output(self): pass + @pt_abstract + def _mfst_commit(self, outspec): + pass - @pt_abstract - def _li_recurse_output_output(self, lin, stdout, stderr): pass + @pt_abstract + def _repo_ver_output(self, outspec, repository_scan=False): + pass - @pt_abstract - def _li_recurse_status_output(self, done): pass - - @pt_abstract - def _li_recurse_progress_output(self, lin): pass - - @pt_abstract - def _reversion(self, pfmri, outspec): pass - -class ProgressTrackerFrontend(object): - """This essentially abstract class forms the interface that other - modules in the system use to record progress against various goals.""" - - # More than one statement on a line; pylint: disable=C0321 - - # Major phases of operation - PHASE_PREPLAN = 1 - PHASE_PLAN = 2 - PHASE_DOWNLOAD = 3 - PHASE_EXECUTE = 4 - PHASE_FINALIZE = 5 - # Extra phase used when we're doing some part of a subphase - # (such as rebuilding the search index) in a standalone operation. - PHASE_UTILITY = 6 - MAJOR_PHASE = [PHASE_PREPLAN, PHASE_PLAN, PHASE_DOWNLOAD, PHASE_EXECUTE, - PHASE_FINALIZE, PHASE_UTILITY] - - # Planning phases - PLAN_SOLVE_SETUP = 100 - PLAN_SOLVE_SOLVER = 101 - PLAN_FIND_MFST = 102 - PLAN_PKGPLAN = 103 - PLAN_ACTION_MERGE = 104 - PLAN_ACTION_CONFLICT = 105 - PLAN_ACTION_CONSOLIDATE = 106 - PLAN_ACTION_MEDIATION = 107 - PLAN_ACTION_FINALIZE = 108 - PLAN_MEDIATION_CHG = 109 # for set-mediator - PLAN_PKG_VERIFY = 110 - PLAN_PKG_FIX = 111 - - # Action phases - ACTION_REMOVE = 200 - ACTION_INSTALL = 201 - ACTION_UPDATE = 202 - - # Finalization/Job phases - JOB_STATE_DB = 300 - JOB_IMAGE_STATE = 301 - JOB_FAST_LOOKUP = 302 - JOB_PKG_CACHE = 303 - JOB_READ_SEARCH = 304 - JOB_UPDATE_SEARCH = 305 - JOB_REBUILD_SEARCH = 306 - # pkgrepo job items - JOB_REPO_DELSEARCH = 307 - JOB_REPO_UPDATE_CAT = 308 - JOB_REPO_ANALYZE_RM = 309 - JOB_REPO_ANALYZE_REPO = 310 - JOB_REPO_RM_MFST = 311 - JOB_REPO_RM_FILES = 312 - JOB_REPO_VERIFY_REPO = 313 - JOB_REPO_FIX_REPO = 314 - - # Operation purpose. This set of modes is used by callers to indicate - # to the progress tracker what's going on at a high level. This allows - # output to be customized by subclasses to meet the needs of a - # particular purpose. - - # - # The purpose of current operations is in the "normal" set of things, - # including install, uninstall, change-variant, and other operations - # in which we can print arbitrary status information with impunity - # - PURPOSE_NORMAL = 0 - - # - # The purpose of current operations is in the service of trying to - # output a listing (list, contents, etc.) to the end user. Subclasses - # will likely want to suppress various bits of status (for non-tty - # output) or erase it (for tty output). - # - PURPOSE_LISTING = 1 - - # - # The purpose of current operations is in the service of figuring out - # if the packaging system itself is up to date. - # - PURPOSE_PKG_UPDATE_CHK = 2 - - # - # Types of lint phases - # - LINT_PHASETYPE_SETUP = 0 - LINT_PHASETYPE_EXECUTE = 1 - - def __init__(self): - # needs to be here due to use of _() - self.phase_names = { - self.PHASE_PREPLAN: _("Startup"), - self.PHASE_PLAN: _("Planning"), - self.PHASE_DOWNLOAD: _("Download"), - self.PHASE_EXECUTE: _("Actions"), - self.PHASE_FINALIZE: _("Finalize"), - self.PHASE_UTILITY: "", - } - - # find the widest string in the list of phases so we can - # set column width properly. - self.phase_max_width = \ - max(len(x) for x in self.phase_names.values()) - - self.li_phase_names = { - self.PHASE_PREPLAN: _("Startup"), - self.PHASE_PLAN: _("Planning"), - self.PHASE_DOWNLOAD: _("Downloading"), - self.PHASE_FINALIZE: _("Executing"), - self.PHASE_UTILITY: _("Processing"), - } - - @pt_abstract - def set_purpose(self, purpose): pass - - @pt_abstract - def get_purpose(self): pass - - @pt_abstract - def reset_download(self): pass - - @pt_abstract - def reset(self): pass - - @pt_abstract - def set_major_phase(self, majorphase): pass - - @pt_abstract - def flush(self): pass - - @pt_abstract - def cache_catalogs_start(self): pass - - @pt_abstract - def cache_catalogs_done(self): pass - - @pt_abstract - def load_catalog_cache_start(self): pass - - @pt_abstract - def load_catalog_cache_done(self): pass - - # fetching catalogs - @pt_abstract - def refresh_start(self, pub_cnt, full_refresh, target_catalog=False): - pass + @pt_abstract + def _repo_ver_output_error(self, errors): + pass - @pt_abstract - def refresh_start_pub(self, pub): pass + @pt_abstract + def _repo_ver_output_done(self): + pass - @pt_abstract - def refresh_end_pub(self, pub): pass + def _repo_fix_output(self, outspec): + pass - @pt_abstract - def refresh_progress(self, pub, nbytes): pass + @pt_abstract + def _repo_fix_output_error(self, errors): + pass - @pt_abstract - def refresh_done(self): pass + @pt_abstract + def _repo_fix_output_info(self, errors): + pass - # planning an operation - @pt_abstract - def plan_all_start(self): pass + @pt_abstract + def _repo_fix_output_done(self): + pass - @pt_abstract - def plan_start(self, planid, goal=None): pass + @pt_abstract + def _archive_output(self, outspec): + pass - @pt_abstract - def plan_add_progress(self, planid, nitems=1): pass + @pt_abstract + def _dl_output(self, outspec): + pass - @pt_abstract - def plan_done(self, planid): pass + @pt_abstract + def _act_output(self, outspec, actionitem): + pass - @pt_abstract - def plan_all_done(self): pass + @pt_abstract + def _act_output_all_done(self): + pass - # getting manifests over the network - @pt_abstract - def manifest_fetch_start(self, goal_mfsts): pass + @pt_abstract + def _job_output(self, outspec, jobitem): + pass - @pt_abstract - def manifest_fetch_progress(self, completion): pass + @pt_abstract + def _republish_output(self, outspec): + pass - @pt_abstract - def manifest_commit(self): pass + @pt_abstract + def _lint_output(self, outspec): + pass - @pt_abstract - def manifest_fetch_done(self): pass + @pt_abstract + def _li_recurse_start_output(self): + pass - # verifying the content of a repository - @pt_abstract - def repo_verify_start(self, npkgs): pass + @pt_abstract + def _li_recurse_end_output(self): + pass - @pt_abstract - def repo_verify_start_pkg(self, pkgfmri, repository_scan=False): pass + @pt_abstract + def _li_recurse_output_output(self, lin, stdout, stderr): + pass - @pt_abstract - def repo_verify_add_progress(self, pkgfmri): pass + @pt_abstract + def _li_recurse_status_output(self, done): + pass - @pt_abstract - def repo_verify_yield_error(self, pkgfmri, errors): pass + @pt_abstract + def _li_recurse_progress_output(self, lin): + pass - @pt_abstract - def repo_verify_yield_warning(self, pkgfmri, warnings): pass + @pt_abstract + def _reversion(self, pfmri, outspec): + pass - @pt_abstract - def repo_verify_yield_info(self, pkgfmri, info): pass - @pt_abstract - def repo_verify_end_pkg(self, pkgfmri): pass - - @pt_abstract - def repo_verify_done(self): pass - - # fixing the content of a repository - @pt_abstract - def repo_fix_start(self, npkgs): pass - - @pt_abstract - def repo_fix_add_progress(self, pkgfmri): pass - - @pt_abstract - def repo_fix_yield_error(self, pkgfmri, errors): pass +class ProgressTrackerFrontend(object): + """This essentially abstract class forms the interface that other + modules in the system use to record progress against various goals.""" + + # More than one statement on a line; pylint: disable=C0321 + + # Major phases of operation + PHASE_PREPLAN = 1 + PHASE_PLAN = 2 + PHASE_DOWNLOAD = 3 + PHASE_EXECUTE = 4 + PHASE_FINALIZE = 5 + # Extra phase used when we're doing some part of a subphase + # (such as rebuilding the search index) in a standalone operation. + PHASE_UTILITY = 6 + MAJOR_PHASE = [ + PHASE_PREPLAN, + PHASE_PLAN, + PHASE_DOWNLOAD, + PHASE_EXECUTE, + PHASE_FINALIZE, + PHASE_UTILITY, + ] + + # Planning phases + PLAN_SOLVE_SETUP = 100 + PLAN_SOLVE_SOLVER = 101 + PLAN_FIND_MFST = 102 + PLAN_PKGPLAN = 103 + PLAN_ACTION_MERGE = 104 + PLAN_ACTION_CONFLICT = 105 + PLAN_ACTION_CONSOLIDATE = 106 + PLAN_ACTION_MEDIATION = 107 + PLAN_ACTION_FINALIZE = 108 + PLAN_MEDIATION_CHG = 109 # for set-mediator + PLAN_PKG_VERIFY = 110 + PLAN_PKG_FIX = 111 + + # Action phases + ACTION_REMOVE = 200 + ACTION_INSTALL = 201 + ACTION_UPDATE = 202 + + # Finalization/Job phases + JOB_STATE_DB = 300 + JOB_IMAGE_STATE = 301 + JOB_FAST_LOOKUP = 302 + JOB_PKG_CACHE = 303 + JOB_READ_SEARCH = 304 + JOB_UPDATE_SEARCH = 305 + JOB_REBUILD_SEARCH = 306 + # pkgrepo job items + JOB_REPO_DELSEARCH = 307 + JOB_REPO_UPDATE_CAT = 308 + JOB_REPO_ANALYZE_RM = 309 + JOB_REPO_ANALYZE_REPO = 310 + JOB_REPO_RM_MFST = 311 + JOB_REPO_RM_FILES = 312 + JOB_REPO_VERIFY_REPO = 313 + JOB_REPO_FIX_REPO = 314 + + # Operation purpose. This set of modes is used by callers to indicate + # to the progress tracker what's going on at a high level. This allows + # output to be customized by subclasses to meet the needs of a + # particular purpose. + + # + # The purpose of current operations is in the "normal" set of things, + # including install, uninstall, change-variant, and other operations + # in which we can print arbitrary status information with impunity + # + PURPOSE_NORMAL = 0 + + # + # The purpose of current operations is in the service of trying to + # output a listing (list, contents, etc.) to the end user. Subclasses + # will likely want to suppress various bits of status (for non-tty + # output) or erase it (for tty output). + # + PURPOSE_LISTING = 1 + + # + # The purpose of current operations is in the service of figuring out + # if the packaging system itself is up to date. + # + PURPOSE_PKG_UPDATE_CHK = 2 + + # + # Types of lint phases + # + LINT_PHASETYPE_SETUP = 0 + LINT_PHASETYPE_EXECUTE = 1 + + def __init__(self): + # needs to be here due to use of _() + self.phase_names = { + self.PHASE_PREPLAN: _("Startup"), + self.PHASE_PLAN: _("Planning"), + self.PHASE_DOWNLOAD: _("Download"), + self.PHASE_EXECUTE: _("Actions"), + self.PHASE_FINALIZE: _("Finalize"), + self.PHASE_UTILITY: "", + } - @pt_abstract - def repo_fix_yield_info(self, pkgfmri, info): pass + # find the widest string in the list of phases so we can + # set column width properly. + self.phase_max_width = max(len(x) for x in self.phase_names.values()) - @pt_abstract - def repo_fix_done(self): pass + self.li_phase_names = { + self.PHASE_PREPLAN: _("Startup"), + self.PHASE_PLAN: _("Planning"), + self.PHASE_DOWNLOAD: _("Downloading"), + self.PHASE_FINALIZE: _("Executing"), + self.PHASE_UTILITY: _("Processing"), + } - # archiving to .p5p files - @pt_abstract - def archive_set_goal(self, arcname, nitems, nbytes): pass + @pt_abstract + def set_purpose(self, purpose): + pass - @pt_abstract - def archive_add_progress(self, nitems, nbytes): pass + @pt_abstract + def get_purpose(self): + pass - @pt_abstract - def archive_done(self): pass + @pt_abstract + def reset_download(self): + pass - # Called when bits arrive, either from on-disk cache or over-the-wire. - @pt_abstract - def download_set_goal(self, npkgs, nfiles, nbytes): pass + @pt_abstract + def reset(self): + pass - @pt_abstract - def download_start_pkg(self, pkgfmri): pass + @pt_abstract + def set_major_phase(self, majorphase): + pass - @pt_abstract - def download_end_pkg(self, pkgfmri): pass + @pt_abstract + def flush(self): + pass - @pt_abstract - def download_add_progress(self, nfiles, nbytes, cachehit=False): - """Call to provide news that the download has made progress.""" - pass + @pt_abstract + def cache_catalogs_start(self): + pass - @pt_abstract - def download_done(self, dryrun=False): - """Call when all downloading is finished.""" - pass + @pt_abstract + def cache_catalogs_done(self): + pass - @pt_abstract - def download_get_progress(self): pass + @pt_abstract + def load_catalog_cache_start(self): + pass + + @pt_abstract + def load_catalog_cache_done(self): + pass + + # fetching catalogs + @pt_abstract + def refresh_start(self, pub_cnt, full_refresh, target_catalog=False): + pass - # Running actions - @pt_abstract - def actions_set_goal(self, actionid, nactions): pass + @pt_abstract + def refresh_start_pub(self, pub): + pass - @pt_abstract - def actions_add_progress(self, actionid): pass + @pt_abstract + def refresh_end_pub(self, pub): + pass - @pt_abstract - def actions_done(self, actionid): pass + @pt_abstract + def refresh_progress(self, pub, nbytes): + pass + + @pt_abstract + def refresh_done(self): + pass + + # planning an operation + @pt_abstract + def plan_all_start(self): + pass - @pt_abstract - def actions_all_done(self): pass + @pt_abstract + def plan_start(self, planid, goal=None): + pass + + @pt_abstract + def plan_add_progress(self, planid, nitems=1): + pass + + @pt_abstract + def plan_done(self, planid): + pass + + @pt_abstract + def plan_all_done(self): + pass + + # getting manifests over the network + @pt_abstract + def manifest_fetch_start(self, goal_mfsts): + pass + + @pt_abstract + def manifest_fetch_progress(self, completion): + pass + + @pt_abstract + def manifest_commit(self): + pass + + @pt_abstract + def manifest_fetch_done(self): + pass + + # verifying the content of a repository + @pt_abstract + def repo_verify_start(self, npkgs): + pass + + @pt_abstract + def repo_verify_start_pkg(self, pkgfmri, repository_scan=False): + pass + + @pt_abstract + def repo_verify_add_progress(self, pkgfmri): + pass + + @pt_abstract + def repo_verify_yield_error(self, pkgfmri, errors): + pass + + @pt_abstract + def repo_verify_yield_warning(self, pkgfmri, warnings): + pass + + @pt_abstract + def repo_verify_yield_info(self, pkgfmri, info): + pass + + @pt_abstract + def repo_verify_end_pkg(self, pkgfmri): + pass + + @pt_abstract + def repo_verify_done(self): + pass + + # fixing the content of a repository + @pt_abstract + def repo_fix_start(self, npkgs): + pass + + @pt_abstract + def repo_fix_add_progress(self, pkgfmri): + pass + + @pt_abstract + def repo_fix_yield_error(self, pkgfmri, errors): + pass + + @pt_abstract + def repo_fix_yield_info(self, pkgfmri, info): + pass + + @pt_abstract + def repo_fix_done(self): + pass + + # archiving to .p5p files + @pt_abstract + def archive_set_goal(self, arcname, nitems, nbytes): + pass + + @pt_abstract + def archive_add_progress(self, nitems, nbytes): + pass + + @pt_abstract + def archive_done(self): + pass + + # Called when bits arrive, either from on-disk cache or over-the-wire. + @pt_abstract + def download_set_goal(self, npkgs, nfiles, nbytes): + pass + + @pt_abstract + def download_start_pkg(self, pkgfmri): + pass + + @pt_abstract + def download_end_pkg(self, pkgfmri): + pass + + @pt_abstract + def download_add_progress(self, nfiles, nbytes, cachehit=False): + """Call to provide news that the download has made progress.""" + pass + + @pt_abstract + def download_done(self, dryrun=False): + """Call when all downloading is finished.""" + pass + + @pt_abstract + def download_get_progress(self): + pass + + # Running actions + @pt_abstract + def actions_set_goal(self, actionid, nactions): + pass + + @pt_abstract + def actions_add_progress(self, actionid): + pass + + @pt_abstract + def actions_done(self, actionid): + pass + + @pt_abstract + def actions_all_done(self): + pass + + @pt_abstract + def job_start(self, jobid, goal=None): + pass + + @pt_abstract + def job_add_progress(self, jobid, nitems=1): + pass + + @pt_abstract + def job_done(self, jobid): + pass + + @pt_abstract + def republish_set_goal(self, npkgs, ngetbytes, nsendbytes): + pass + + @pt_abstract + def republish_start_pkg(self, pkgfmri, getbytes=None, sendbytes=None): + pass + + @pt_abstract + def republish_end_pkg(self, pkgfmri): + pass + + @pt_abstract + def upload_add_progress(self, nbytes): + """Call to provide news that the upload has made progress.""" + pass + + @pt_abstract + def republish_done(self, dryrun=False): + """Call when all republishing is finished.""" + pass + + @pt_abstract + def lint_next_phase(self, goalitems, lint_phasetype): + """Call to indicate a new phase of lint progress.""" + pass + + @pt_abstract + def lint_add_progress(self): + pass + + @pt_abstract + def lint_done(self): + pass + + @pt_abstract + def set_linked_name(self, lin): + """Called once an image determines its linked image name.""" + pass + + @pt_abstract + def li_recurse_start(self, pkg_op, total): + """Call when we recurse into a child linked image.""" + pass + + @pt_abstract + def li_recurse_end(self): + """Call when we return from a child linked image.""" + pass + + @pt_abstract + def li_recurse_status(self, lin_running, done): + """Call to update the progress tracker with the list of + images being operated on.""" + pass + + @pt_abstract + def li_recurse_output(self, lin, stdout, stderr): + """Call to display output from linked image operations.""" + pass + + @pt_abstract + def li_recurse_progress(self, lin): + """Call to indicate that the named child made progress.""" + pass + + @pt_abstract + def reversion_start(self, goal_pkgs, goal_revs): + pass + + @pt_abstract + def reversion_add_progress(self, pfmri, pkgs=0, reversioned=0, adjusted=0): + pass + + @pt_abstract + def reversion_done(self): + pass - @pt_abstract - def job_start(self, jobid, goal=None): pass - @pt_abstract - def job_add_progress(self, jobid, nitems=1): pass - - @pt_abstract - def job_done(self, jobid): pass +class ProgressTracker(ProgressTrackerFrontend, ProgressTrackerBackend): + """This class is used by the client to render and track progress + towards the completion of various tasks, such as download, + installation, update, etc. + + The superclass is largely concerned with tracking the raw numbers, and + with calling various callback routines when events of interest occur. + The callback routines are defined in the ProgressTrackerBackend class, + below. + + Different subclasses provide the actual rendering to the user, with + differing levels of detail and prettiness. + + Note that as currently envisioned, this class is concerned with + tracking the progress of long-running operations: it is NOT a general + purpose output mechanism nor an error collector. + + Most subclasses of ProgressTracker need not override the methods of + this class. However, most subclasses will need need to mix in and + define ALL of the methods from the ProgressTrackerBackend class.""" + + DL_MODE_DOWNLOAD = 1 + DL_MODE_REPUBLISH = 2 + + def __init__(self): + ProgressTrackerBackend.__init__(self) + ProgressTrackerFrontend.__init__(self) + self.reset() + + def reset_download(self): + # Attribute defined outside __init__; pylint: disable=W0201 + self.dl_mode = None + self.dl_estimator = None + + self.dl_pkgs = GoalTrackerItem(_("Download packages")) + self.dl_files = GoalTrackerItem(_("Download files")) + self.dl_bytes = GoalTrackerItem(_("Download bytes")) + self._dl_items = [self.dl_pkgs, self.dl_files, self.dl_bytes] + + # republishing support; republishing also uses dl_bytes + self.repub_pkgs = GoalTrackerItem(_("Republished pkgs")) + self.repub_send_bytes = GoalTrackerItem(_("Republish sent bytes")) + + def reset(self): + # Attribute defined outside __init__; pylint: disable=W0201 + self.major_phase = self.PHASE_PREPLAN + self.purpose = self.PURPOSE_NORMAL + + self.pub_refresh = GoalTrackerItem(_("Refresh Publishers")) + # We don't know the goal in advance for this one + self.pub_refresh_bytes = TrackerItem(_("Refresh bytes")) + self.refresh_target_catalog = None + self.refresh_full_refresh = False + + self.mfst_fetch = GoalTrackerItem(_("Download Manifests")) + self.mfst_commit = GoalTrackerItem(_("Committed Manifests")) + + self.repo_ver_pkgs = GoalTrackerItem(_("Verify Repository Content")) + self.repo_fix = GoalTrackerItem(_("Fix Repository Content")) + + # archiving support + self.archive_items = GoalTrackerItem(_("Archived items")) + self.archive_bytes = GoalTrackerItem(_("Archived bytes")) + + # reversioning support + self.reversion_pkgs = GoalTrackerItem(_("Processed Packages")) + self.reversion_revs = GoalTrackerItem(_("Reversioned Packages")) + self.reversion_adjs = GoalTrackerItem(_("Adjusted Packages")) + + # Used to measure elapsed time of entire planning; not otherwise + # rendered to the user. + self.plan_generic = TrackerItem("") + + self._planitems = { + self.PLAN_SOLVE_SETUP: TrackerItem(_("Solver setup")), + self.PLAN_SOLVE_SOLVER: TrackerItem(_("Running solver")), + self.PLAN_FIND_MFST: TrackerItem(_("Finding local manifests")), + self.PLAN_PKGPLAN: GoalTrackerItem(_("Package planning")), + self.PLAN_ACTION_MERGE: TrackerItem(_("Merging actions")), + self.PLAN_ACTION_CONFLICT: TrackerItem( + _("Checking for conflicting actions") + ), + self.PLAN_ACTION_CONSOLIDATE: TrackerItem( + _("Consolidating action changes") + ), + self.PLAN_ACTION_MEDIATION: TrackerItem(_("Evaluating mediators")), + self.PLAN_ACTION_FINALIZE: TrackerItem(_("Finalizing action plan")), + self.PLAN_MEDIATION_CHG: TrackerItem( + _("Evaluating mediator changes") + ), + self.PLAN_PKG_VERIFY: GoalTrackerItem(_("Verifying Packages")), + self.PLAN_PKG_FIX: GoalTrackerItem(_("Fixing Packages")), + } - @pt_abstract - def republish_set_goal(self, npkgs, ngetbytes, nsendbytes): pass + self._actionitems = { + self.ACTION_REMOVE: GoalTrackerItem(_("Removing old actions")), + self.ACTION_INSTALL: GoalTrackerItem(_("Installing new actions")), + self.ACTION_UPDATE: GoalTrackerItem(_("Updating modified actions")), + } - @pt_abstract - def republish_start_pkg(self, pkgfmri, getbytes=None, sendbytes=None): - pass + self._jobitems = { + self.JOB_STATE_DB: TrackerItem( + _("Updating package state database") + ), + self.JOB_IMAGE_STATE: TrackerItem(_("Updating image state")), + self.JOB_FAST_LOOKUP: TrackerItem( + _("Creating fast lookup database") + ), + self.JOB_PKG_CACHE: GoalTrackerItem(_("Updating package cache")), + self.JOB_READ_SEARCH: TrackerItem(_("Reading search index")), + self.JOB_UPDATE_SEARCH: GoalTrackerItem(_("Updating search index")), + self.JOB_REBUILD_SEARCH: GoalTrackerItem( + _("Building new search index") + ), + # pkgrepo job items + self.JOB_REPO_DELSEARCH: TrackerItem(_("Deleting search index")), + self.JOB_REPO_UPDATE_CAT: TrackerItem(_("Updating catalog")), + self.JOB_REPO_ANALYZE_RM: GoalTrackerItem( + _("Analyzing removed packages") + ), + self.JOB_REPO_ANALYZE_REPO: GoalTrackerItem( + _("Analyzing repository packages") + ), + self.JOB_REPO_RM_MFST: GoalTrackerItem( + _("Removing package manifests") + ), + self.JOB_REPO_RM_FILES: GoalTrackerItem( + _("Removing package files") + ), + self.JOB_REPO_VERIFY_REPO: GoalTrackerItem( + _("Verifying repository content") + ), + self.JOB_REPO_FIX_REPO: GoalTrackerItem( + _("Fixing repository content") + ), + } - @pt_abstract - def republish_end_pkg(self, pkgfmri): pass + self.reset_download() - @pt_abstract - def upload_add_progress(self, nbytes): - """Call to provide news that the upload has made progress.""" - pass + self._archive_name = None - @pt_abstract - def republish_done(self, dryrun=False): - """Call when all republishing is finished.""" - pass + # Lint's interaction with the progresstracker probably + # needs further work. + self.lint_phase = None + self.lint_phasetype = None + # This GoalTrackerItem is created on the fly. + self.lintitems = None - @pt_abstract - def lint_next_phase(self, goalitems, lint_phasetype): - """Call to indicate a new phase of lint progress.""" - pass + # Linked images + self.linked_name = None + self.linked_running = [] + self.linked_pkg_op = None + self.linked_total = 0 - @pt_abstract - def lint_add_progress(self): pass + def set_major_phase(self, majorphase): + # Attribute defined outside __init__; pylint: disable=W0201 + self.major_phase = majorphase - @pt_abstract - def lint_done(self): pass + def flush(self): + """Used to signal to the progresstracker that it should make + the output ready for use by another subsystem. In a + terminal-based environment, this would make sure that no + partially printed lines were present, and flush e.g. stdout.""" + self._output_flush() - @pt_abstract - def set_linked_name(self, lin): - """Called once an image determines its linked image name.""" - pass + def set_purpose(self, purpose): + op = self.purpose + self.purpose = purpose # pylint: disable=W0201 + if op != self.purpose: + self._change_purpose(op, purpose) - @pt_abstract - def li_recurse_start(self, pkg_op, total): - """Call when we recurse into a child linked image.""" - pass + def get_purpose(self): + return self.purpose - @pt_abstract - def li_recurse_end(self): - """Call when we return from a child linked image.""" - pass + def cache_catalogs_start(self): + self._cache_cats_output(OutSpec(first=True)) - @pt_abstract - def li_recurse_status(self, lin_running, done): - """Call to update the progress tracker with the list of - images being operated on.""" - pass + def cache_catalogs_done(self): + self._cache_cats_output(OutSpec(last=True)) - @pt_abstract - def li_recurse_output(self, lin, stdout, stderr): - """Call to display output from linked image operations.""" - pass + def load_catalog_cache_start(self): + self._load_cat_cache_output(OutSpec(first=True)) - @pt_abstract - def li_recurse_progress(self, lin): - """Call to indicate that the named child made progress.""" - pass + def load_catalog_cache_done(self): + self._load_cat_cache_output(OutSpec(last=True)) - @pt_abstract - def reversion_start(self, goal_pkgs, goal_revs): pass + def refresh_start(self, pub_cnt, full_refresh, target_catalog=False): + # + # We can wind up doing multiple refreshes in some cases, + # for example when we have to check if pkg(7) is up-to-date, + # so we reset these each time we start. + # + self.pub_refresh.reset() + self.pub_refresh.goalitems = pub_cnt + self.pub_refresh_bytes.reset() + # Attribute defined outside __init__; pylint: disable=W0201 + self.refresh_full_refresh = full_refresh + self.refresh_target_catalog = target_catalog + if self.refresh_target_catalog: + assert self.refresh_full_refresh + + def refresh_start_pub(self, pub): + outspec = OutSpec() + # for now we only refresh one at a time, so we stash + # this here, and then assert for it in end_pub and + # in refresh_progress. + self.pub_refresh.curinfo = pub + if not self.pub_refresh.printed: + outspec.first = True + outspec.changed.append("startpublisher") + self.pub_refresh.printed = True + self._refresh_output_progress(outspec) + + def refresh_end_pub(self, pub): + assert pub == self.pub_refresh.curinfo + assert self.pub_refresh.printed + outspec = OutSpec() + outspec.changed.append("endpublisher") + self.pub_refresh.items += 1 + self._refresh_output_progress(outspec) + + def refresh_progress(self, pub, nbytes): + # when called back from the transport we lose the knowledge + # of what 'pub' is, at least for now. + assert pub is None or pub == self.pub_refresh.curinfo + assert self.pub_refresh.printed + self.pub_refresh_bytes.items += nbytes + self._refresh_output_progress(OutSpec()) + + def refresh_done(self): + # If refreshes fail, we might not meet the goal. + self.pub_refresh.done(goalcheck=False) + self.pub_refresh_bytes.done() + self._refresh_output_progress(OutSpec(last=True)) + + def plan_all_start(self): + self.set_major_phase(self.PHASE_PLAN) + self.plan_generic.reset() + self.plan_generic.start() + + def plan_start(self, planid, goal=None): + planitem = self._planitems[planid] + planitem.reset() + if goal: + if not isinstance(planitem, GoalTrackerItem): + raise RuntimeError("can't set goal on non-goal tracker") + planitem.goalitems = goal + planitem.start() + + def plan_add_progress(self, planid, nitems=1): + planitem = self._planitems[planid] + outspec = OutSpec(first=not planitem.printed) + planitem.items += nitems + self._plan_output(outspec, planitem) + planitem.printed = True + + def plan_done(self, planid): + planitem = self._planitems[planid] + planitem.done() + if planitem.printed: + self._plan_output(OutSpec(last=True), planitem) + + def plan_all_done(self): + self.plan_generic.done() + self._plan_output_all_done() + + def manifest_fetch_start(self, goal_mfsts): + self.mfst_fetch.reset() + self.mfst_commit.reset() + self.mfst_fetch.goalitems = goal_mfsts + self.mfst_commit.goalitems = goal_mfsts + + def manifest_fetch_progress(self, completion): + assert self.major_phase in [self.PHASE_PLAN, self.PHASE_UTILITY] + outspec = OutSpec(first=not self.mfst_fetch.printed) + self.mfst_fetch.printed = True + if completion: + self.mfst_fetch.items += 1 + outspec.changed.append("manifests") + self._mfst_fetch(outspec) + + def manifest_commit(self): + assert self.major_phase in [self.PHASE_PLAN, self.PHASE_UTILITY] + outspec = OutSpec(first=not self.mfst_commit.printed) + self.mfst_commit.printed = True + self.mfst_commit.items += 1 + self._mfst_commit(outspec) + + def manifest_fetch_done(self): + # These can fail to reach their goals due to various transport + # errors, depot misconfigurations, etc. So disable goal check. + self.mfst_fetch.done(goalcheck=False) + self.mfst_commit.done(goalcheck=False) + if self.mfst_fetch.printed: + self._mfst_fetch(OutSpec(last=True)) + + def repo_verify_start(self, npkgs): + self.repo_ver_pkgs.reset() + self.repo_ver_pkgs.goalitems = npkgs + + def repo_verify_start_pkg(self, pkgfmri, repository_scan=False): + if pkgfmri != self.repo_ver_pkgs.curinfo: + self.repo_ver_pkgs.items += 1 + self.repo_ver_pkgs.curinfo = pkgfmri + self._repo_ver_output( + OutSpec(changed=["startpkg"]), repository_scan=repository_scan + ) + + def repo_verify_add_progress(self, pkgfmri): + self._repo_ver_output(OutSpec()) + + def repo_verify_yield_error(self, pkgfmri, errors): + self._repo_ver_output_error(errors) + + def repo_verify_end_pkg(self, pkgfmri): + self._repo_ver_output(OutSpec(changed=["endpkg"])) + self.repo_ver_pkgs.curinfo = None + + def repo_verify_done(self): + self.repo_ver_pkgs.done() + + def repo_fix_start(self, nitems): + self.repo_fix.reset() + self.repo_fix.goalitems = nitems + + def repo_fix_add_progress(self, pkgfmri): + self._repo_fix_output(OutSpec()) + + def repo_fix_yield_error(self, pkgfmri, errors): + self._repo_fix_output_error(errors) + + def repo_fix_yield_info(self, pkgfmri, info): + self._repo_fix_output_info(info) + + def repo_fix_done(self): + self.repo_fix.done() + + def archive_set_goal(self, arcname, nitems, nbytes): + self._archive_name = arcname # pylint: disable=W0201 + self.archive_items.goalitems = nitems + self.archive_bytes.goalitems = nbytes + + def archive_add_progress(self, nitems, nbytes): + outspec = OutSpec() + if not self.archive_bytes.printed: + self.archive_bytes.printed = True + outspec.first = True + self.archive_items.items += nitems + self.archive_bytes.items += nbytes + self._archive_output(outspec) + + def archive_done(self): + """Call when all archiving is finished""" + self.archive_items.done() + self.archive_bytes.done() + # only print 'last' if we printed 'first' + if self.archive_bytes.printed: + self._archive_output(OutSpec(last=True)) + + def download_set_goal(self, npkgs, nfiles, nbytes): + # Attribute defined outside __init__; pylint: disable=W0201 + self.dl_mode = self.DL_MODE_DOWNLOAD + self.dl_pkgs.goalitems = npkgs + self.dl_files.goalitems = nfiles + self.dl_bytes.goalitems = nbytes + self.dl_estimator = SpeedEstimator(self.dl_bytes.goalitems) + + def download_start_pkg(self, pkgfmri): + self.set_major_phase(self.PHASE_DOWNLOAD) + self.dl_pkgs.curinfo = pkgfmri + outspec = OutSpec(changed=["startpkg"]) + if self.dl_bytes.goalitems != 0: + if not self.dl_bytes.printed: + # indicate that this is the first _dl_output + # call + self.dl_bytes.printed = True + self.dl_estimator.start() + outspec.first = True + self._dl_output(outspec) + + def download_end_pkg(self, pkgfmri): + self.dl_pkgs.items += 1 + if self.dl_bytes.goalitems != 0: + self._dl_output(OutSpec(changed=["endpkg"])) + + def download_add_progress(self, nfiles, nbytes, cachehit=False): + """Call to provide news that the download has made progress""" + # + # These guards are present because download_add_progress can + # be called when an *upload* aborts; we want to prevent updates + # to these items, since they in this case might have no goals. + # + if self.dl_bytes.goalitems > 0: + self.dl_bytes.items += nbytes + if self.dl_files.goalitems > 0: + self.dl_files.items += nfiles - @pt_abstract - def reversion_add_progress(self, pfmri, pkgs=0, reversioned=0, - adjusted=0): - pass + if cachehit: + self.dl_estimator.goalbytes -= nbytes + else: + self.dl_estimator.newdata(nbytes) + + if self.dl_bytes.goalitems != 0: + outspec = OutSpec() + if nbytes > 0: + outspec.changed.append("dl_bytes") + if nfiles > 0: + outspec.changed.append("dl_files") + if self.dl_mode == self.DL_MODE_DOWNLOAD: + self._dl_output(outspec) + if self.dl_mode == self.DL_MODE_REPUBLISH: + self._republish_output(outspec) + + def download_done(self, dryrun=False): + """Call when all downloading is finished.""" + if dryrun: + # Dryrun mode is used by pkgrecv in order to + # simulate a download; we do what we have to + # in order to fake up a download result. + self.dl_pkgs.items = self.dl_pkgs.goalitems + self.dl_files.items = self.dl_files.goalitems + self.dl_bytes.items = self.dl_bytes.goalitems + self.dl_estimator.start(timestamp=0) + self.dl_estimator.newdata(self.dl_bytes.goalitems, timestamp=0) + self.dl_estimator.done(timestamp=0) + else: + self.dl_estimator.done() - @pt_abstract - def reversion_done(self): pass + self.dl_pkgs.done() + self.dl_files.done() + self.dl_bytes.done() + if self.dl_bytes.goalitems != 0: + self._dl_output(OutSpec(last=True)) -class ProgressTracker(ProgressTrackerFrontend, ProgressTrackerBackend): - """This class is used by the client to render and track progress - towards the completion of various tasks, such as download, - installation, update, etc. - - The superclass is largely concerned with tracking the raw numbers, and - with calling various callback routines when events of interest occur. - The callback routines are defined in the ProgressTrackerBackend class, - below. - - Different subclasses provide the actual rendering to the user, with - differing levels of detail and prettiness. - - Note that as currently envisioned, this class is concerned with - tracking the progress of long-running operations: it is NOT a general - purpose output mechanism nor an error collector. - - Most subclasses of ProgressTracker need not override the methods of - this class. However, most subclasses will need need to mix in and - define ALL of the methods from the ProgressTrackerBackend class.""" - - DL_MODE_DOWNLOAD = 1 - DL_MODE_REPUBLISH = 2 - - def __init__(self): - ProgressTrackerBackend.__init__(self) - ProgressTrackerFrontend.__init__(self) - self.reset() - - def reset_download(self): - # Attribute defined outside __init__; pylint: disable=W0201 - self.dl_mode = None - self.dl_estimator = None - - self.dl_pkgs = GoalTrackerItem(_("Download packages")) - self.dl_files = GoalTrackerItem(_("Download files")) - self.dl_bytes = GoalTrackerItem(_("Download bytes")) - self._dl_items = [self.dl_pkgs, self.dl_files, self.dl_bytes] - - # republishing support; republishing also uses dl_bytes - self.repub_pkgs = \ - GoalTrackerItem(_("Republished pkgs")) - self.repub_send_bytes = \ - GoalTrackerItem(_("Republish sent bytes")) - - def reset(self): - # Attribute defined outside __init__; pylint: disable=W0201 - self.major_phase = self.PHASE_PREPLAN - self.purpose = self.PURPOSE_NORMAL - - self.pub_refresh = GoalTrackerItem(_("Refresh Publishers")) - # We don't know the goal in advance for this one - self.pub_refresh_bytes = TrackerItem(_("Refresh bytes")) - self.refresh_target_catalog = None - self.refresh_full_refresh = False - - self.mfst_fetch = GoalTrackerItem(_("Download Manifests")) - self.mfst_commit = GoalTrackerItem(_("Committed Manifests")) - - self.repo_ver_pkgs = GoalTrackerItem( - _("Verify Repository Content")) - self.repo_fix = GoalTrackerItem(_("Fix Repository Content")) - - # archiving support - self.archive_items = GoalTrackerItem(_("Archived items")) - self.archive_bytes = GoalTrackerItem(_("Archived bytes")) - - # reversioning support - self.reversion_pkgs = GoalTrackerItem(_("Processed Packages")) - self.reversion_revs = GoalTrackerItem(_("Reversioned Packages")) - self.reversion_adjs = GoalTrackerItem(_("Adjusted Packages")) - - # Used to measure elapsed time of entire planning; not otherwise - # rendered to the user. - self.plan_generic = TrackerItem("") - - self._planitems = { - self.PLAN_SOLVE_SETUP: - TrackerItem(_("Solver setup")), - self.PLAN_SOLVE_SOLVER: - TrackerItem(_("Running solver")), - self.PLAN_FIND_MFST: - TrackerItem(_("Finding local manifests")), - self.PLAN_PKGPLAN: - GoalTrackerItem(_("Package planning")), - self.PLAN_ACTION_MERGE: - TrackerItem(_("Merging actions")), - self.PLAN_ACTION_CONFLICT: - TrackerItem(_("Checking for conflicting actions")), - self.PLAN_ACTION_CONSOLIDATE: - TrackerItem(_("Consolidating action changes")), - self.PLAN_ACTION_MEDIATION: - TrackerItem(_("Evaluating mediators")), - self.PLAN_ACTION_FINALIZE: - TrackerItem(_("Finalizing action plan")), - self.PLAN_MEDIATION_CHG: - TrackerItem(_("Evaluating mediator changes")), - self.PLAN_PKG_VERIFY: - GoalTrackerItem(_("Verifying Packages")), - self.PLAN_PKG_FIX: - GoalTrackerItem(_("Fixing Packages")), - } - - self._actionitems = { - self.ACTION_REMOVE: - GoalTrackerItem(_("Removing old actions")), - self.ACTION_INSTALL: - GoalTrackerItem(_("Installing new actions")), - self.ACTION_UPDATE: - GoalTrackerItem(_("Updating modified actions")), - } - - self._jobitems = { - self.JOB_STATE_DB: - TrackerItem(_("Updating package state database")), - self.JOB_IMAGE_STATE: - TrackerItem(_("Updating image state")), - self.JOB_FAST_LOOKUP: - TrackerItem(_("Creating fast lookup database")), - self.JOB_PKG_CACHE: - GoalTrackerItem(_("Updating package cache")), - self.JOB_READ_SEARCH: - TrackerItem(_("Reading search index")), - self.JOB_UPDATE_SEARCH: - GoalTrackerItem(_("Updating search index")), - self.JOB_REBUILD_SEARCH: - GoalTrackerItem(_("Building new search index")), - - # pkgrepo job items - self.JOB_REPO_DELSEARCH: - TrackerItem(_("Deleting search index")), - self.JOB_REPO_UPDATE_CAT: - TrackerItem(_("Updating catalog")), - self.JOB_REPO_ANALYZE_RM: - GoalTrackerItem(_("Analyzing removed packages")), - self.JOB_REPO_ANALYZE_REPO: - GoalTrackerItem(_("Analyzing repository packages")), - self.JOB_REPO_RM_MFST: - GoalTrackerItem(_("Removing package manifests")), - self.JOB_REPO_RM_FILES: - GoalTrackerItem(_("Removing package files")), - self.JOB_REPO_VERIFY_REPO: - GoalTrackerItem(_("Verifying repository content")), - self.JOB_REPO_FIX_REPO: - GoalTrackerItem(_("Fixing repository content")) - - } - - self.reset_download() - - self._archive_name = None - - # Lint's interaction with the progresstracker probably - # needs further work. - self.lint_phase = None - self.lint_phasetype = None - # This GoalTrackerItem is created on the fly. - self.lintitems = None - - # Linked images - self.linked_name = None - self.linked_running = [] - self.linked_pkg_op = None - self.linked_total = 0 - - def set_major_phase(self, majorphase): - # Attribute defined outside __init__; pylint: disable=W0201 - self.major_phase = majorphase - - def flush(self): - """Used to signal to the progresstracker that it should make - the output ready for use by another subsystem. In a - terminal-based environment, this would make sure that no - partially printed lines were present, and flush e.g. stdout.""" - self._output_flush() - - def set_purpose(self, purpose): - op = self.purpose - self.purpose = purpose # pylint: disable=W0201 - if op != self.purpose: - self._change_purpose(op, purpose) - - def get_purpose(self): - return self.purpose - - def cache_catalogs_start(self): - self._cache_cats_output(OutSpec(first=True)) - - def cache_catalogs_done(self): - self._cache_cats_output(OutSpec(last=True)) - - def load_catalog_cache_start(self): - self._load_cat_cache_output(OutSpec(first=True)) - - def load_catalog_cache_done(self): - self._load_cat_cache_output(OutSpec(last=True)) - - def refresh_start(self, pub_cnt, full_refresh, target_catalog=False): - # - # We can wind up doing multiple refreshes in some cases, - # for example when we have to check if pkg(7) is up-to-date, - # so we reset these each time we start. - # - self.pub_refresh.reset() - self.pub_refresh.goalitems = pub_cnt - self.pub_refresh_bytes.reset() - # Attribute defined outside __init__; pylint: disable=W0201 - self.refresh_full_refresh = full_refresh - self.refresh_target_catalog = target_catalog - if self.refresh_target_catalog: - assert self.refresh_full_refresh - - def refresh_start_pub(self, pub): - outspec = OutSpec() - # for now we only refresh one at a time, so we stash - # this here, and then assert for it in end_pub and - # in refresh_progress. - self.pub_refresh.curinfo = pub - if not self.pub_refresh.printed: - outspec.first = True - outspec.changed.append("startpublisher") - self.pub_refresh.printed = True - self._refresh_output_progress(outspec) - - def refresh_end_pub(self, pub): - assert pub == self.pub_refresh.curinfo - assert self.pub_refresh.printed - outspec = OutSpec() - outspec.changed.append("endpublisher") - self.pub_refresh.items += 1 - self._refresh_output_progress(outspec) - - def refresh_progress(self, pub, nbytes): - # when called back from the transport we lose the knowledge - # of what 'pub' is, at least for now. - assert pub is None or pub == self.pub_refresh.curinfo - assert self.pub_refresh.printed - self.pub_refresh_bytes.items += nbytes - self._refresh_output_progress(OutSpec()) - - def refresh_done(self): - # If refreshes fail, we might not meet the goal. - self.pub_refresh.done(goalcheck=False) - self.pub_refresh_bytes.done() - self._refresh_output_progress(OutSpec(last=True)) - - def plan_all_start(self): - self.set_major_phase(self.PHASE_PLAN) - self.plan_generic.reset() - self.plan_generic.start() - - def plan_start(self, planid, goal=None): - planitem = self._planitems[planid] - planitem.reset() - if goal: - if not isinstance(planitem, GoalTrackerItem): - raise RuntimeError( - "can't set goal on non-goal tracker") - planitem.goalitems = goal - planitem.start() - - def plan_add_progress(self, planid, nitems=1): - planitem = self._planitems[planid] - outspec = OutSpec(first=not planitem.printed) - planitem.items += nitems - self._plan_output(outspec, planitem) - planitem.printed = True - - def plan_done(self, planid): - planitem = self._planitems[planid] - planitem.done() - if planitem.printed: - self._plan_output(OutSpec(last=True), planitem) - - def plan_all_done(self): - self.plan_generic.done() - self._plan_output_all_done() - - def manifest_fetch_start(self, goal_mfsts): - self.mfst_fetch.reset() - self.mfst_commit.reset() - self.mfst_fetch.goalitems = goal_mfsts - self.mfst_commit.goalitems = goal_mfsts - - def manifest_fetch_progress(self, completion): - assert self.major_phase in [self.PHASE_PLAN, self.PHASE_UTILITY] - outspec = OutSpec(first=not self.mfst_fetch.printed) - self.mfst_fetch.printed = True - if completion: - self.mfst_fetch.items += 1 - outspec.changed.append("manifests") - self._mfst_fetch(outspec) - - def manifest_commit(self): - assert self.major_phase in [self.PHASE_PLAN, self.PHASE_UTILITY] - outspec = OutSpec(first=not self.mfst_commit.printed) - self.mfst_commit.printed = True - self.mfst_commit.items += 1 - self._mfst_commit(outspec) - - def manifest_fetch_done(self): - # These can fail to reach their goals due to various transport - # errors, depot misconfigurations, etc. So disable goal check. - self.mfst_fetch.done(goalcheck=False) - self.mfst_commit.done(goalcheck=False) - if self.mfst_fetch.printed: - self._mfst_fetch(OutSpec(last=True)) - - def repo_verify_start(self, npkgs): - self.repo_ver_pkgs.reset() - self.repo_ver_pkgs.goalitems = npkgs - - def repo_verify_start_pkg(self, pkgfmri, repository_scan=False): - if pkgfmri != self.repo_ver_pkgs.curinfo: - self.repo_ver_pkgs.items += 1 - self.repo_ver_pkgs.curinfo = pkgfmri - self._repo_ver_output(OutSpec(changed=["startpkg"]), - repository_scan=repository_scan) - - def repo_verify_add_progress(self, pkgfmri): - self._repo_ver_output(OutSpec()) - - def repo_verify_yield_error(self, pkgfmri, errors): - self._repo_ver_output_error(errors) - - def repo_verify_end_pkg(self, pkgfmri): - self._repo_ver_output(OutSpec(changed=["endpkg"])) - self.repo_ver_pkgs.curinfo = None - - def repo_verify_done(self): - self.repo_ver_pkgs.done() - - def repo_fix_start(self, nitems): - self.repo_fix.reset() - self.repo_fix.goalitems = nitems - - def repo_fix_add_progress(self, pkgfmri): - self._repo_fix_output(OutSpec()) - - def repo_fix_yield_error(self, pkgfmri, errors): - self._repo_fix_output_error(errors) - - def repo_fix_yield_info(self, pkgfmri, info): - self._repo_fix_output_info(info) - - def repo_fix_done(self): - self.repo_fix.done() - - def archive_set_goal(self, arcname, nitems, nbytes): - self._archive_name = arcname # pylint: disable=W0201 - self.archive_items.goalitems = nitems - self.archive_bytes.goalitems = nbytes - - def archive_add_progress(self, nitems, nbytes): - outspec = OutSpec() - if not self.archive_bytes.printed: - self.archive_bytes.printed = True - outspec.first = True - self.archive_items.items += nitems - self.archive_bytes.items += nbytes - self._archive_output(outspec) - - def archive_done(self): - """Call when all archiving is finished""" - self.archive_items.done() - self.archive_bytes.done() - # only print 'last' if we printed 'first' - if self.archive_bytes.printed: - self._archive_output(OutSpec(last=True)) - - def download_set_goal(self, npkgs, nfiles, nbytes): - # Attribute defined outside __init__; pylint: disable=W0201 - self.dl_mode = self.DL_MODE_DOWNLOAD - self.dl_pkgs.goalitems = npkgs - self.dl_files.goalitems = nfiles - self.dl_bytes.goalitems = nbytes - self.dl_estimator = SpeedEstimator(self.dl_bytes.goalitems) - - def download_start_pkg(self, pkgfmri): - self.set_major_phase(self.PHASE_DOWNLOAD) - self.dl_pkgs.curinfo = pkgfmri - outspec = OutSpec(changed=["startpkg"]) - if self.dl_bytes.goalitems != 0: - if not self.dl_bytes.printed: - # indicate that this is the first _dl_output - # call - self.dl_bytes.printed = True - self.dl_estimator.start() - outspec.first = True - self._dl_output(outspec) - - def download_end_pkg(self, pkgfmri): - self.dl_pkgs.items += 1 - if self.dl_bytes.goalitems != 0: - self._dl_output(OutSpec(changed=["endpkg"])) - - def download_add_progress(self, nfiles, nbytes, cachehit=False): - """Call to provide news that the download has made progress""" - # - # These guards are present because download_add_progress can - # be called when an *upload* aborts; we want to prevent updates - # to these items, since they in this case might have no goals. - # - if self.dl_bytes.goalitems > 0: - self.dl_bytes.items += nbytes - if self.dl_files.goalitems > 0: - self.dl_files.items += nfiles - - if cachehit: - self.dl_estimator.goalbytes -= nbytes - else: - self.dl_estimator.newdata(nbytes) - - if self.dl_bytes.goalitems != 0: - outspec = OutSpec() - if nbytes > 0: - outspec.changed.append("dl_bytes") - if nfiles > 0: - outspec.changed.append("dl_files") - if self.dl_mode == self.DL_MODE_DOWNLOAD: - self._dl_output(outspec) - if self.dl_mode == self.DL_MODE_REPUBLISH: - self._republish_output(outspec) - - def download_done(self, dryrun=False): - """Call when all downloading is finished.""" - if dryrun: - # Dryrun mode is used by pkgrecv in order to - # simulate a download; we do what we have to - # in order to fake up a download result. - self.dl_pkgs.items = self.dl_pkgs.goalitems - self.dl_files.items = self.dl_files.goalitems - self.dl_bytes.items = self.dl_bytes.goalitems - self.dl_estimator.start(timestamp=0) - self.dl_estimator.newdata(self.dl_bytes.goalitems, - timestamp=0) - self.dl_estimator.done(timestamp=0) - else: - self.dl_estimator.done() - - self.dl_pkgs.done() - self.dl_files.done() - self.dl_bytes.done() - - if self.dl_bytes.goalitems != 0: - self._dl_output(OutSpec(last=True)) - - def actions_set_goal(self, actionid, nactions): - """Called to set the goal for a particular phase of action - activity (i.e. ACTION_REMOVE, ACTION_INSTALL, or ACTION_UPDATE. - """ - assert self.major_phase == self.PHASE_EXECUTE - actionitem = self._actionitems[actionid] - actionitem.reset() - actionitem.goalitems = nactions - - def actions_add_progress(self, actionid): - assert self.major_phase == self.PHASE_EXECUTE - actionitem = self._actionitems[actionid] - actionitem.items += 1 - self._act_output(OutSpec(first=(actionitem.items == 1)), - actionitem) - - def actions_done(self, actionid): - """Called when done each phase of actions processing.""" - assert self.major_phase == self.PHASE_EXECUTE - actionitem = self._actionitems[actionid] - actionitem.done() - if actionitem.goalitems != 0: - self._act_output(OutSpec(last=True), actionitem) - - def actions_all_done(self): - total_actions = sum(x.items for x in self._actionitems.values()) - if total_actions != 0: - self._act_output_all_done() - - def job_start(self, jobid, goal=None): - jobitem = self._jobitems[jobid] - jobitem.reset() - outspec = OutSpec() - if goal: - if not isinstance(jobitem, GoalTrackerItem): - raise RuntimeError( - "can't set goal on non-goal tracker") - jobitem.goalitems = goal - jobitem.printed = True - self._job_output(outspec, jobitem) - - def job_add_progress(self, jobid, nitems=1): - jobitem = self._jobitems[jobid] - outspec = OutSpec(first=not jobitem.printed) - jobitem.printed = True - jobitem.items += nitems - self._job_output(outspec, jobitem) - - def job_done(self, jobid): - jobitem = self._jobitems[jobid] - # only print the 'done' if we printed the 'start' - jobitem.done() - if jobitem.printed: - self._job_output(OutSpec(last=True), jobitem) - - def republish_set_goal(self, npkgs, ngetbytes, nsendbytes): - self.dl_mode = self.DL_MODE_REPUBLISH # pylint: disable=W0201 - - self.repub_pkgs.goalitems = npkgs - self.repub_send_bytes.goalitems = nsendbytes - - self.dl_bytes.goalitems = ngetbytes - # We don't have a good value to set this to. - self.dl_files.goalitems = 1 << 64 - # Attribute defined outside __init__; pylint: disable=W0201 - self.dl_estimator = SpeedEstimator(self.dl_bytes.goalitems) - - def republish_start_pkg(self, pkgfmri, getbytes=None, sendbytes=None): - assert isinstance(pkgfmri, pkg.fmri.PkgFmri) - - if getbytes is not None: - # Allow reset of GET and SEND amounts on a per-package - # basis. This allows the user to monitor the overall - # progress of the operation in terms of total packages - # while not requiring the program to pre-process all - # packages to determine total byte sizes before starting - # the operation. - assert sendbytes is not None - self.dl_bytes.items = 0 - self.dl_bytes.goalitems = getbytes - self.dl_estimator.goalbytes = getbytes - - self.repub_send_bytes.items = 0 - self.repub_send_bytes.goalitems = sendbytes - - self.repub_pkgs.curinfo = pkgfmri - outspec = OutSpec(changed=["startpkg"]) - # - # We can't do our normal trick of checking to see if - # dl_bytes.items is zero because it might have been reset - # above. - # - if not self.repub_pkgs.printed: - # indicate that this is the first _republish_output call - outspec.first = True - self.repub_pkgs.printed = True - self.dl_estimator.start() - if self.repub_pkgs.goalitems != 0: - self._republish_output(outspec) - - def republish_end_pkg(self, pkgfmri): - self.repub_pkgs.items += 1 - self._republish_output(OutSpec(changed=["endpkg"])) - - def upload_add_progress(self, nbytes): - """Call to provide news that the upload has made progress""" - # - # upload_add_progress can be called when a *download* aborts; - # this guard prevents us from updating the item (which has - # no goal set, and will raise an exception). - # - if self.repub_send_bytes.goalitems and \ - self.repub_send_bytes.goalitems > 0: - self.repub_send_bytes.items += nbytes - self._republish_output(OutSpec()) - - def republish_done(self, dryrun=False): - """Call when all republishing is finished""" - if dryrun: - self.repub_pkgs.items = self.repub_pkgs.goalitems - self.repub_send_bytes.items = \ - self.repub_send_bytes.goalitems - self.dl_bytes.items = self.dl_bytes.goalitems - - self.repub_pkgs.done() - self.repub_send_bytes.done() - self.dl_bytes.done() - - if self.repub_pkgs.goalitems != 0: - outspec = OutSpec(last=True) - # Get the header printed if we've not printed - # anything else thus far (happens in dryrun mode). - outspec.first = not self.repub_pkgs.printed - self._republish_output(outspec) - self.repub_pkgs.printed = True - - def lint_next_phase(self, goalitems, lint_phasetype): - # Attribute defined outside __init__; pylint: disable=W0201 - self.lint_phasetype = lint_phasetype - if self.lint_phase is not None: - self._lint_output(OutSpec(last=True)) - if self.lint_phase is None: - self.lint_phase = 0 - self.lint_phase += 1 - if lint_phasetype == self.LINT_PHASETYPE_SETUP: - phasename = _("Lint setup {0:d}".format( - self.lint_phase)) - else: - phasename = _("Lint phase {0:d}".format( - self.lint_phase)) - self.lintitems = GoalTrackerItem(phasename) - self.lintitems.goalitems = goalitems - self._lint_output(OutSpec(first=True)) - - def lint_add_progress(self): - self.lintitems.items += 1 - self._lint_output(OutSpec()) - - def lint_done(self): - self.lint_phase = None # pylint: disable=W0201 - if self.lintitems: - self._lint_output(OutSpec(last=True)) - - def set_linked_name(self, lin): - """Called once an image determines its linked image name.""" - self.linked_name = lin # pylint: disable=W0201 - - def li_recurse_start(self, pkg_op, total): - """Called when we recurse into a child linked image.""" - # Attribute defined outside __init__; pylint: disable=W0201 - self.linked_pkg_op = pkg_op - self.linked_total = total - self._li_recurse_start_output() - - def li_recurse_end(self): - """Called when we return from a child linked image.""" - self._li_recurse_end_output() - - def li_recurse_status(self, lin_running, done): - """Call to update the progress tracker with the list of - images being operated on.""" - # Attribute defined outside __init__; pylint: disable=W0201 - self.linked_running = sorted(lin_running) - self._li_recurse_status_output(done) - - def li_recurse_output(self, lin, stdout, stderr): - """Call to display output from linked image operations.""" - self._li_recurse_output_output(lin, stdout, stderr) - - def li_recurse_progress(self, lin): - """Call to indicate that the named child made progress.""" - self._li_recurse_progress_output(lin) - - def reversion_start(self, goal_pkgs, goal_revs): - self.reversion_adjs.reset() - self.reversion_revs.reset() - self.reversion_pkgs.reset() - self.reversion_revs.goalitems = goal_revs - self.reversion_pkgs.goalitems = goal_pkgs - self.reversion_adjs.goalitems = -1 - - def reversion_add_progress(self, pfmri, pkgs=0, reversioned=0, - adjusted=0): - outspec = OutSpec() - if not self.reversion_pkgs.printed: - self.reversion_pkgs.printed = True - outspec.first = True - - self.reversion_revs.items += reversioned - self.reversion_adjs.items += adjusted - self.reversion_pkgs.items += pkgs - self._reversion(pfmri, outspec) - - def reversion_done(self): - self.reversion_pkgs.done() - self.reversion_revs.done() - self.reversion_adjs.done(goalcheck=False) - if self.reversion_pkgs.printed: - self._reversion("Done", OutSpec(last=True)) + def actions_set_goal(self, actionid, nactions): + """Called to set the goal for a particular phase of action + activity (i.e. ACTION_REMOVE, ACTION_INSTALL, or ACTION_UPDATE. + """ + assert self.major_phase == self.PHASE_EXECUTE + actionitem = self._actionitems[actionid] + actionitem.reset() + actionitem.goalitems = nactions + + def actions_add_progress(self, actionid): + assert self.major_phase == self.PHASE_EXECUTE + actionitem = self._actionitems[actionid] + actionitem.items += 1 + self._act_output(OutSpec(first=(actionitem.items == 1)), actionitem) + + def actions_done(self, actionid): + """Called when done each phase of actions processing.""" + assert self.major_phase == self.PHASE_EXECUTE + actionitem = self._actionitems[actionid] + actionitem.done() + if actionitem.goalitems != 0: + self._act_output(OutSpec(last=True), actionitem) + + def actions_all_done(self): + total_actions = sum(x.items for x in self._actionitems.values()) + if total_actions != 0: + self._act_output_all_done() + + def job_start(self, jobid, goal=None): + jobitem = self._jobitems[jobid] + jobitem.reset() + outspec = OutSpec() + if goal: + if not isinstance(jobitem, GoalTrackerItem): + raise RuntimeError("can't set goal on non-goal tracker") + jobitem.goalitems = goal + jobitem.printed = True + self._job_output(outspec, jobitem) + + def job_add_progress(self, jobid, nitems=1): + jobitem = self._jobitems[jobid] + outspec = OutSpec(first=not jobitem.printed) + jobitem.printed = True + jobitem.items += nitems + self._job_output(outspec, jobitem) + + def job_done(self, jobid): + jobitem = self._jobitems[jobid] + # only print the 'done' if we printed the 'start' + jobitem.done() + if jobitem.printed: + self._job_output(OutSpec(last=True), jobitem) + + def republish_set_goal(self, npkgs, ngetbytes, nsendbytes): + self.dl_mode = self.DL_MODE_REPUBLISH # pylint: disable=W0201 + + self.repub_pkgs.goalitems = npkgs + self.repub_send_bytes.goalitems = nsendbytes + + self.dl_bytes.goalitems = ngetbytes + # We don't have a good value to set this to. + self.dl_files.goalitems = 1 << 64 + # Attribute defined outside __init__; pylint: disable=W0201 + self.dl_estimator = SpeedEstimator(self.dl_bytes.goalitems) + + def republish_start_pkg(self, pkgfmri, getbytes=None, sendbytes=None): + assert isinstance(pkgfmri, pkg.fmri.PkgFmri) + + if getbytes is not None: + # Allow reset of GET and SEND amounts on a per-package + # basis. This allows the user to monitor the overall + # progress of the operation in terms of total packages + # while not requiring the program to pre-process all + # packages to determine total byte sizes before starting + # the operation. + assert sendbytes is not None + self.dl_bytes.items = 0 + self.dl_bytes.goalitems = getbytes + self.dl_estimator.goalbytes = getbytes + + self.repub_send_bytes.items = 0 + self.repub_send_bytes.goalitems = sendbytes + + self.repub_pkgs.curinfo = pkgfmri + outspec = OutSpec(changed=["startpkg"]) + # + # We can't do our normal trick of checking to see if + # dl_bytes.items is zero because it might have been reset + # above. + # + if not self.repub_pkgs.printed: + # indicate that this is the first _republish_output call + outspec.first = True + self.repub_pkgs.printed = True + self.dl_estimator.start() + if self.repub_pkgs.goalitems != 0: + self._republish_output(outspec) + + def republish_end_pkg(self, pkgfmri): + self.repub_pkgs.items += 1 + self._republish_output(OutSpec(changed=["endpkg"])) + + def upload_add_progress(self, nbytes): + """Call to provide news that the upload has made progress""" + # + # upload_add_progress can be called when a *download* aborts; + # this guard prevents us from updating the item (which has + # no goal set, and will raise an exception). + # + if ( + self.repub_send_bytes.goalitems + and self.repub_send_bytes.goalitems > 0 + ): + self.repub_send_bytes.items += nbytes + self._republish_output(OutSpec()) + + def republish_done(self, dryrun=False): + """Call when all republishing is finished""" + if dryrun: + self.repub_pkgs.items = self.repub_pkgs.goalitems + self.repub_send_bytes.items = self.repub_send_bytes.goalitems + self.dl_bytes.items = self.dl_bytes.goalitems + + self.repub_pkgs.done() + self.repub_send_bytes.done() + self.dl_bytes.done() + + if self.repub_pkgs.goalitems != 0: + outspec = OutSpec(last=True) + # Get the header printed if we've not printed + # anything else thus far (happens in dryrun mode). + outspec.first = not self.repub_pkgs.printed + self._republish_output(outspec) + self.repub_pkgs.printed = True + + def lint_next_phase(self, goalitems, lint_phasetype): + # Attribute defined outside __init__; pylint: disable=W0201 + self.lint_phasetype = lint_phasetype + if self.lint_phase is not None: + self._lint_output(OutSpec(last=True)) + if self.lint_phase is None: + self.lint_phase = 0 + self.lint_phase += 1 + if lint_phasetype == self.LINT_PHASETYPE_SETUP: + phasename = _("Lint setup {0:d}".format(self.lint_phase)) + else: + phasename = _("Lint phase {0:d}".format(self.lint_phase)) + self.lintitems = GoalTrackerItem(phasename) + self.lintitems.goalitems = goalitems + self._lint_output(OutSpec(first=True)) + + def lint_add_progress(self): + self.lintitems.items += 1 + self._lint_output(OutSpec()) + + def lint_done(self): + self.lint_phase = None # pylint: disable=W0201 + if self.lintitems: + self._lint_output(OutSpec(last=True)) + + def set_linked_name(self, lin): + """Called once an image determines its linked image name.""" + self.linked_name = lin # pylint: disable=W0201 + + def li_recurse_start(self, pkg_op, total): + """Called when we recurse into a child linked image.""" + # Attribute defined outside __init__; pylint: disable=W0201 + self.linked_pkg_op = pkg_op + self.linked_total = total + self._li_recurse_start_output() + + def li_recurse_end(self): + """Called when we return from a child linked image.""" + self._li_recurse_end_output() + + def li_recurse_status(self, lin_running, done): + """Call to update the progress tracker with the list of + images being operated on.""" + # Attribute defined outside __init__; pylint: disable=W0201 + self.linked_running = sorted(lin_running) + self._li_recurse_status_output(done) + + def li_recurse_output(self, lin, stdout, stderr): + """Call to display output from linked image operations.""" + self._li_recurse_output_output(lin, stdout, stderr) + + def li_recurse_progress(self, lin): + """Call to indicate that the named child made progress.""" + self._li_recurse_progress_output(lin) + + def reversion_start(self, goal_pkgs, goal_revs): + self.reversion_adjs.reset() + self.reversion_revs.reset() + self.reversion_pkgs.reset() + self.reversion_revs.goalitems = goal_revs + self.reversion_pkgs.goalitems = goal_pkgs + self.reversion_adjs.goalitems = -1 + + def reversion_add_progress(self, pfmri, pkgs=0, reversioned=0, adjusted=0): + outspec = OutSpec() + if not self.reversion_pkgs.printed: + self.reversion_pkgs.printed = True + outspec.first = True + + self.reversion_revs.items += reversioned + self.reversion_adjs.items += adjusted + self.reversion_pkgs.items += pkgs + self._reversion(pfmri, outspec) + + def reversion_done(self): + self.reversion_pkgs.done() + self.reversion_revs.done() + self.reversion_adjs.done(goalcheck=False) + if self.reversion_pkgs.printed: + self._reversion("Done", OutSpec(last=True)) class MultiProgressTracker(ProgressTrackerFrontend): - """This class is a proxy, dispatching incoming progress tracking calls - to one or more contained (in self._trackers) additional progress - trackers. So, you can use this class to route progress tracking calls - to multiple places at once (for example, to the screen and to a log - file). + """This class is a proxy, dispatching incoming progress tracking calls + to one or more contained (in self._trackers) additional progress + trackers. So, you can use this class to route progress tracking calls + to multiple places at once (for example, to the screen and to a log + file). - We hijack most of the methods of the front-end superclass, except for - the constructor. For each hijacked method, we substitute a closure of - the multido() routine bound with the appropriate arguments.""" + We hijack most of the methods of the front-end superclass, except for + the constructor. For each hijacked method, we substitute a closure of + the multido() routine bound with the appropriate arguments.""" - def __init__(self, ptlist): - ProgressTrackerFrontend.__init__(self) + def __init__(self, ptlist): + ProgressTrackerFrontend.__init__(self) - self._trackers = [t for t in ptlist] - if len(self._trackers) == 0: - raise ProgressTrackerException("No trackers specified") - - # - # Returns a multido closure, which will iterate and call the - # named method for each tracker registered with the class. - # - def make_multido(method_name): - # self and method_name are bound in this context. - def multido(*args, **kwargs): - for trk in self._trackers: - f = getattr(trk, method_name) - f(*args, **kwargs) - return multido - - # - # Look in the ProgressTrackerFrontend for a list of frontend - # methods to multiplex. - # - for methname, m in six.iteritems( - ProgressTrackerFrontend.__dict__): - if methname == "__init__": - continue - if not inspect.isfunction(m): - continue - # Override all methods which aren't the constructor. - # Yes, this is a big hammer. - setattr(self, methname, make_multido(methname)) - return - - -class QuietProgressTracker(ProgressTracker): - """This progress tracker outputs nothing, but is semantically - intended to be "quiet." See also NullProgressTracker below.""" + self._trackers = [t for t in ptlist] + if len(self._trackers) == 0: + raise ProgressTrackerException("No trackers specified") # - # At construction, we inspect the ProgressTrackerBackend abstract - # superclass, and implement all of its methods as empty stubs. + # Returns a multido closure, which will iterate and call the + # named method for each tracker registered with the class. # - def __init__(self): - ProgressTracker.__init__(self) - - # We modify the object such that all of the methods it needs to - # implement are set to this __donothing empty method. - - def __donothing(*args, **kwargs): - # Unused argument 'args', 'kwargs'; - # pylint: disable=W0613 - pass - - for methname in ProgressTrackerBackend.__dict__: - if methname == "__init__": - continue - boundmeth = getattr(self, methname) - if not inspect.ismethod(boundmeth): - continue - setattr(self, methname, __donothing) - - -class NullProgressTracker(QuietProgressTracker): - """This ProgressTracker is a subclass of QuietProgressTracker because - that's convenient for now. It is semantically intended to be a no-op - progress tracker, and is useful for short-running operations which - need not display progress of any kind. + def make_multido(method_name): + # self and method_name are bound in this context. + def multido(*args, **kwargs): + for trk in self._trackers: + f = getattr(trk, method_name) + f(*args, **kwargs) - This subclass should be used by external consumers wanting to create - their own ProgressTracker class as any new output methods added to the - ProgressTracker class will also be handled here, insulating them from - additions to the ProgressTracker class.""" - - -class FunctionProgressTracker(ProgressTracker): - """This ProgressTracker is principally used for debugging. - Essentially it uses method replacement in order to create a - "tracing" ProgressTracker that shows calls to front end methods - and calls from the frontend to the backend.""" + return multido # - # When an instance of this class is initialized, we use inspection to - # insert a new method for each method; for frontend methods "chain" - # the old one behind the new one. The new method dumps out the - # arguments. + # Look in the ProgressTrackerFrontend for a list of frontend + # methods to multiplex. # - def __init__(self, output_file=sys.stdout): - ProgressTracker.__init__(self) - self.output_file = output_file - - def __donothing(*args, **kwargs): - # Unused argument 'args', 'kwargs'; - # pylint: disable=W0613 - pass - - # We modify the instance such that all of the methods it needs - # to implement are set to this __printargs method. - def make_printargs(methname, chainedmeth): - def __printargs(*args, **kwargs): - s = "" - for x in args: - s += "{0}, ".format(str(x)) - for x in sorted(kwargs): - s += "{0}={1}, ".format(x, kwargs[x]) - s = s[:-2] - - # - # Invoke chained method implementation; it's - # counter-intuitive, but we do this before - # printing things out, because under the - # circumstances we create in - # test_progress_tracker(), the chained method - # could throw an exception, aborting an - # upstream MultiProgressTracker's multido(), - # and spoiling the test_multi() test case. - # - chainedmeth(*args, **kwargs) - print("{0}({1})".format(methname, s), - file=self.output_file) - - return __printargs - - for methname in ProgressTrackerFrontend.__dict__: - if methname == "__init__": - continue - # - # this gets us the bound method, which we say here - # is "chained"-- we'll call it next after our inserted - # method. - # - chainedmeth = getattr(self, methname, None) - if not inspect.ismethod(chainedmeth): - continue - setattr(self, methname, - make_printargs(methname, chainedmeth)) - - for methname in ProgressTrackerBackend.__dict__: - if methname == "__init__": - continue - chainedmeth = getattr(self, methname, None) - if not inspect.ismethod(chainedmeth): - continue - chainedmeth = __donothing - setattr(self, methname, - make_printargs(methname, chainedmeth)) + for methname, m in six.iteritems(ProgressTrackerFrontend.__dict__): + if methname == "__init__": + continue + if not inspect.isfunction(m): + continue + # Override all methods which aren't the constructor. + # Yes, this is a big hammer. + setattr(self, methname, make_multido(methname)) + return -class DotProgressTracker(ProgressTracker): - """This tracker writes a series of dots for every operation. - This is intended for use by linked images.""" +class QuietProgressTracker(ProgressTracker): + """This progress tracker outputs nothing, but is semantically + intended to be "quiet." See also NullProgressTracker below.""" - TERM_DELAY = 0.1 + # + # At construction, we inspect the ProgressTrackerBackend abstract + # superclass, and implement all of its methods as empty stubs. + # + def __init__(self): + ProgressTracker.__init__(self) - def __init__(self, output_file=sys.stdout, term_delay=TERM_DELAY): - ProgressTracker.__init__(self) + # We modify the object such that all of the methods it needs to + # implement are set to this __donothing empty method. - self._pe = printengine.POSIXPrintEngine(output_file, - ttymode=False) - self._ptimer = PrintTimer(term_delay) + def __donothing(*args, **kwargs): + # Unused argument 'args', 'kwargs'; + # pylint: disable=W0613 + pass - def make_dot(): - def dot(*args, **kwargs): - # Unused argument 'args', 'kwargs'; - # pylint: disable=W0613 - if self._ptimer.time_to_print(): - self._pe.cprint(".", end='') - return dot + for methname in ProgressTrackerBackend.__dict__: + if methname == "__init__": + continue + boundmeth = getattr(self, methname) + if not inspect.ismethod(boundmeth): + continue + setattr(self, methname, __donothing) - for methname in ProgressTrackerBackend.__dict__: - if methname == "__init__": - continue - boundmeth = getattr(self, methname, None) - if not inspect.ismethod(boundmeth): - continue - setattr(self, methname, make_dot()) +class NullProgressTracker(QuietProgressTracker): + """This ProgressTracker is a subclass of QuietProgressTracker because + that's convenient for now. It is semantically intended to be a no-op + progress tracker, and is useful for short-running operations which + need not display progress of any kind. -class CommandLineProgressTracker(ProgressTracker): - """This progress tracker is a generically useful tracker for command - line output. It needs no special terminal features and so is - appropriate for sending through a pipe. This code is intended to be - platform neutral.""" - - # Default to printing periodic output every 5 seconds. - TERM_DELAY = 5.0 - - def __init__(self, output_file=sys.stdout, print_engine=None, - term_delay=TERM_DELAY): - ProgressTracker.__init__(self) - if not print_engine: - self._pe = printengine.POSIXPrintEngine(output_file, - ttymode=False) - else: - self._pe = print_engine - self._ptimer = PrintTimer(term_delay) - - def _phase_prefix(self): - if self.major_phase == self.PHASE_UTILITY: - return "" - - # The following string was originally expressed as - # "%*s: ". % \ - # (self.phase_max_width, self.phase_names[self.major_phase] - # ) - # however xgettext incorrectly flags this as an improper use of - # non-parameterized messages, which gets detected as an error - # during our build. So instead, we express the string using - # an equivalent .format(..) function - s = _("{{phase:>{0:d}}}: ").format(self.phase_max_width) - return s.format(phase=self.phase_names[self.major_phase]) + This subclass should be used by external consumers wanting to create + their own ProgressTracker class as any new output methods added to the + ProgressTracker class will also be handled here, insulating them from + additions to the ProgressTracker class.""" - # - # Helper routines - # - def __generic_start(self, msg): - # In the case of listing/up-to-date check operations, we - # we don't want to output planning information, so skip. - if self.purpose != self.PURPOSE_NORMAL: - return - self._pe.cprint(self._phase_prefix() + msg, end='') - # indicate that we just printed. - self._ptimer.reset_now() - - def __generic_done(self, msg=None): - # See __generic_start above. - if self.purpose != self.PURPOSE_NORMAL: - return - if msg is None: - msg = " " + _("Done") - self._pe.cprint(msg, end='\n') - self._ptimer.reset() - - def __generic_done_item(self, item, msg=None): - # See __generic_start above. - if self.purpose != self.PURPOSE_NORMAL: - return - if msg is None: - if global_settings.client_output_verbose > 0: - msg = " " + _("Done ({elapsed:>.3f}s)") - else: - msg = " " + _("Done") - outmsg = msg.format(elapsed=item.elapsed()) - self._pe.cprint(outmsg, end='\n') - self._ptimer.reset() - # - # Overridden methods from ProgressTrackerBackend - # - def _output_flush(self): - self._pe.flush() - - def _change_purpose(self, op, np): - self._ptimer.reset() - if np == self.PURPOSE_PKG_UPDATE_CHK: - self._pe.cprint(self._phase_prefix() + - _("Checking that pkg(7) is up to date ..."), end='') - if op == self.PURPOSE_PKG_UPDATE_CHK: - self._pe.cprint(" " + _("Done")) - - def _cache_cats_output(self, outspec): - if outspec.first: - self.__generic_start(_("Caching catalogs ...")) - if outspec.last: - self.__generic_done() - - def _load_cat_cache_output(self, outspec): - if outspec.first: - self.__generic_start(_("Loading catalog cache ...")) - if outspec.last: - self.__generic_done() - - def _refresh_output_progress(self, outspec): - # See __generic_start above. - if self.purpose != self.PURPOSE_NORMAL: - return - if "startpublisher" in outspec.changed: - p = self.pub_refresh.curinfo.prefix - if self.refresh_target_catalog: - m = _("Retrieving target catalog '{0}' " - "...").format(p) - elif self.refresh_full_refresh: - m = _("Retrieving catalog '{0}' ...").format(p) - else: - m = _("Refreshing catalog '{0}' ...").format(p) - self.__generic_start(m) - elif "endpublisher" in outspec.changed: - self.__generic_done() - - def _plan_output(self, outspec, planitem): - if outspec.first: - self.__generic_start(_("{0} ...").format(planitem.name)) - if outspec.last: - self.__generic_done_item(planitem) - - def _plan_output_all_done(self): - self.__generic_done(self._phase_prefix() + \ - _("Planning completed in {0:>.2f} seconds").format( - self.plan_generic.elapsed())) - - def _mfst_fetch(self, outspec): - if not self._ptimer.time_to_print() and \ - not outspec.first and not outspec.last: - return - if self.purpose != self.PURPOSE_NORMAL: - return - - # Reset timer; this prevents double printing for - # outspec.first and then again for the timer expiration - if outspec.first: - self._ptimer.reset_now() +class FunctionProgressTracker(ProgressTracker): + """This ProgressTracker is principally used for debugging. + Essentially it uses method replacement in order to create a + "tracing" ProgressTracker that shows calls to front end methods + and calls from the frontend to the backend.""" + + # + # When an instance of this class is initialized, we use inspection to + # insert a new method for each method; for frontend methods "chain" + # the old one behind the new one. The new method dumps out the + # arguments. + # + def __init__(self, output_file=sys.stdout): + ProgressTracker.__init__(self) + self.output_file = output_file + + def __donothing(*args, **kwargs): + # Unused argument 'args', 'kwargs'; + # pylint: disable=W0613 + pass + + # We modify the instance such that all of the methods it needs + # to implement are set to this __printargs method. + def make_printargs(methname, chainedmeth): + def __printargs(*args, **kwargs): + s = "" + for x in args: + s += "{0}, ".format(str(x)) + for x in sorted(kwargs): + s += "{0}={1}, ".format(x, kwargs[x]) + s = s[:-2] # - # There are a couple of reasons we might fetch manifests-- - # pkgrecv, pkglint, etc. can all do this. _phase_prefix() - # adjusts the output based on the major phase. + # Invoke chained method implementation; it's + # counter-intuitive, but we do this before + # printing things out, because under the + # circumstances we create in + # test_progress_tracker(), the chained method + # could throw an exception, aborting an + # upstream MultiProgressTracker's multido(), + # and spoiling the test_multi() test case. # - self._pe.cprint(self._phase_prefix() + - _("Fetching manifests: {num} {pctcomplete}% " - "complete").format( - num=self.mfst_fetch.pair(), - pctcomplete=int(self.mfst_fetch.pctdone()))) - - def _mfst_commit(self, outspec): - # For now, manifest commit is hard to handle in this - # line-oriented prog tracker, as we alternate back and forth - # between fetching and committing, and we don't want to - # spam the user with this too much. - pass + chainedmeth(*args, **kwargs) + print("{0}({1})".format(methname, s), file=self.output_file) + + return __printargs + + for methname in ProgressTrackerFrontend.__dict__: + if methname == "__init__": + continue + # + # this gets us the bound method, which we say here + # is "chained"-- we'll call it next after our inserted + # method. + # + chainedmeth = getattr(self, methname, None) + if not inspect.ismethod(chainedmeth): + continue + setattr(self, methname, make_printargs(methname, chainedmeth)) + + for methname in ProgressTrackerBackend.__dict__: + if methname == "__init__": + continue + chainedmeth = getattr(self, methname, None) + if not inspect.ismethod(chainedmeth): + continue + chainedmeth = __donothing + setattr(self, methname, make_printargs(methname, chainedmeth)) - def _repo_ver_output(self, outspec, repository_scan=False): - pass - def _repo_ver_output_error(self, errors): - self._pe.cprint(errors) +class DotProgressTracker(ProgressTracker): + """This tracker writes a series of dots for every operation. + This is intended for use by linked images.""" - def _repo_ver_output_warning(self, warnings): - pass + TERM_DELAY = 0.1 - def _repo_ver_output_info(self, info): - pass + def __init__(self, output_file=sys.stdout, term_delay=TERM_DELAY): + ProgressTracker.__init__(self) - def _repo_ver_output_done(self): - pass + self._pe = printengine.POSIXPrintEngine(output_file, ttymode=False) + self._ptimer = PrintTimer(term_delay) - def _repo_fix_output(self, outspec): - pass + def make_dot(): + def dot(*args, **kwargs): + # Unused argument 'args', 'kwargs'; + # pylint: disable=W0613 + if self._ptimer.time_to_print(): + self._pe.cprint(".", end="") - def _repo_fix_output_error(self, errors): - self._pe.cprint(errors) + return dot - def _repo_fix_output_info(self, info): - self._pe.cprint(info) + for methname in ProgressTrackerBackend.__dict__: + if methname == "__init__": + continue + boundmeth = getattr(self, methname, None) + if not inspect.ismethod(boundmeth): + continue + setattr(self, methname, make_dot()) - def _repo_fix_output_done(self): - pass - def _dl_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec.first and \ - not outspec.last: - return - - # Reset timer; this prevents double printing for - # outspec.first and then again for the timer expiration - if outspec.first: - self._ptimer.reset_now() - - if not outspec.last: - speed = self.dl_estimator.get_speed_estimate() - else: - speed = self.dl_estimator.get_final_speed() - speedstr = "" if speed is None else \ - "({0})".format(self.dl_estimator.format_speed(speed)) - - if not outspec.last: - # 'first' or time to print - mbs = format_pair("{0:.1f}", self.dl_bytes.items, - self.dl_bytes.goalitems, scale=(1024 * 1024)) - self._pe.cprint( - _("Download: {num} items {mbs}MB " - "{pctcomplete}% complete {speed}").format( - num=self.dl_files.pair(), mbs=mbs, - pctcomplete=int(self.dl_bytes.pctdone()), - speed=speedstr)) - else: - # 'last' - goal = misc.bytes_to_str(self.dl_bytes.goalitems) - self.__generic_done( - msg=_("Download: Completed {num} in {sec:>.2f} " - "seconds {speed}").format( - num=goal, sec=self.dl_estimator.elapsed(), - speed=speedstr)) - - def _republish_output(self, outspec): - if "startpkg" in outspec.changed: - pkgfmri = self.repub_pkgs.curinfo - self.__generic_start(_("Republish: {0} ... ").format( - pkgfmri.get_fmri(anarchy=True))) - if "endpkg" in outspec.changed: - self.__generic_done() - - def _archive_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec: - return - if outspec.first: - # tell ptimer that we just printed. - self._ptimer.reset_now() - - if outspec.last: - goal = misc.bytes_to_str(self.archive_bytes.goalitems) - self.__generic_done( - msg=_("Archiving: Completed {num} in {secs:>.2f} " - "seconds").format( - num=goal, secs=self.archive_items.elapsed())) - return - - mbs = format_pair("{0:.1f}", self.archive_bytes.items, - self.archive_bytes.goalitems, scale=(1024 * 1024)) - self._pe.cprint( - _("Archiving: {pair} items {mbs}MB {pctcomplete}% " - "complete").format( - pair=self.archive_items.pair(), mbs=mbs, - pctcomplete=int(self.archive_bytes.pctdone()))) +class CommandLineProgressTracker(ProgressTracker): + """This progress tracker is a generically useful tracker for command + line output. It needs no special terminal features and so is + appropriate for sending through a pipe. This code is intended to be + platform neutral.""" + + # Default to printing periodic output every 5 seconds. + TERM_DELAY = 5.0 + + def __init__( + self, output_file=sys.stdout, print_engine=None, term_delay=TERM_DELAY + ): + ProgressTracker.__init__(self) + if not print_engine: + self._pe = printengine.POSIXPrintEngine(output_file, ttymode=False) + else: + self._pe = print_engine + self._ptimer = PrintTimer(term_delay) + + def _phase_prefix(self): + if self.major_phase == self.PHASE_UTILITY: + return "" + + # The following string was originally expressed as + # "%*s: ". % \ + # (self.phase_max_width, self.phase_names[self.major_phase] + # ) + # however xgettext incorrectly flags this as an improper use of + # non-parameterized messages, which gets detected as an error + # during our build. So instead, we express the string using + # an equivalent .format(..) function + s = _("{{phase:>{0:d}}}: ").format(self.phase_max_width) + return s.format(phase=self.phase_names[self.major_phase]) + + # + # Helper routines + # + def __generic_start(self, msg): + # In the case of listing/up-to-date check operations, we + # we don't want to output planning information, so skip. + if self.purpose != self.PURPOSE_NORMAL: + return + self._pe.cprint(self._phase_prefix() + msg, end="") + # indicate that we just printed. + self._ptimer.reset_now() + + def __generic_done(self, msg=None): + # See __generic_start above. + if self.purpose != self.PURPOSE_NORMAL: + return + if msg is None: + msg = " " + _("Done") + self._pe.cprint(msg, end="\n") + self._ptimer.reset() + + def __generic_done_item(self, item, msg=None): + # See __generic_start above. + if self.purpose != self.PURPOSE_NORMAL: + return + if msg is None: + if global_settings.client_output_verbose > 0: + msg = " " + _("Done ({elapsed:>.3f}s)") + else: + msg = " " + _("Done") + outmsg = msg.format(elapsed=item.elapsed()) + self._pe.cprint(outmsg, end="\n") + self._ptimer.reset() + + # + # Overridden methods from ProgressTrackerBackend + # + def _output_flush(self): + self._pe.flush() + + def _change_purpose(self, op, np): + self._ptimer.reset() + if np == self.PURPOSE_PKG_UPDATE_CHK: + self._pe.cprint( + self._phase_prefix() + + _("Checking that pkg(7) is up to date ..."), + end="", + ) + if op == self.PURPOSE_PKG_UPDATE_CHK: + self._pe.cprint(" " + _("Done")) + + def _cache_cats_output(self, outspec): + if outspec.first: + self.__generic_start(_("Caching catalogs ...")) + if outspec.last: + self.__generic_done() + + def _load_cat_cache_output(self, outspec): + if outspec.first: + self.__generic_start(_("Loading catalog cache ...")) + if outspec.last: + self.__generic_done() + + def _refresh_output_progress(self, outspec): + # See __generic_start above. + if self.purpose != self.PURPOSE_NORMAL: + return + if "startpublisher" in outspec.changed: + p = self.pub_refresh.curinfo.prefix + if self.refresh_target_catalog: + m = _("Retrieving target catalog '{0}' " "...").format(p) + elif self.refresh_full_refresh: + m = _("Retrieving catalog '{0}' ...").format(p) + else: + m = _("Refreshing catalog '{0}' ...").format(p) + self.__generic_start(m) + elif "endpublisher" in outspec.changed: + self.__generic_done() + + def _plan_output(self, outspec, planitem): + if outspec.first: + self.__generic_start(_("{0} ...").format(planitem.name)) + if outspec.last: + self.__generic_done_item(planitem) + + def _plan_output_all_done(self): + self.__generic_done( + self._phase_prefix() + + _("Planning completed in {0:>.2f} seconds").format( + self.plan_generic.elapsed() + ) + ) + + def _mfst_fetch(self, outspec): + if ( + not self._ptimer.time_to_print() + and not outspec.first + and not outspec.last + ): + return + if self.purpose != self.PURPOSE_NORMAL: + return + + # Reset timer; this prevents double printing for + # outspec.first and then again for the timer expiration + if outspec.first: + self._ptimer.reset_now() # - # The progress tracking infrastructure wants to tell us about each - # kind of action activity (install, remove, update). For this - # progress tracker, we don't really care to expose that to the user, - # so we work in terms of total actions instead. + # There are a couple of reasons we might fetch manifests-- + # pkgrecv, pkglint, etc. can all do this. _phase_prefix() + # adjusts the output based on the major phase. # - def _act_output(self, outspec, actionitem): - if not self._ptimer.time_to_print() and not outspec.first: - return - # reset timer, since we're definitely printing now... - self._ptimer.reset_now() - total_actions = \ - sum(x.items for x in self._actionitems.values()) - total_goal = \ - sum(x.goalitems for x in self._actionitems.values()) - self._pe.cprint(self._phase_prefix() + - _("{num} actions ({type})").format( - num=format_pair("{0:d}", total_actions, total_goal), - type=actionitem.name)) - - def _act_output_all_done(self): - total_goal = \ - sum(x.goalitems for x in self._actionitems.values()) - total_time = \ - sum(x.elapsed() for x in self._actionitems.values()) - if total_goal == 0: - return - self._pe.cprint(self._phase_prefix() + - _("Completed {numactions:d} actions in {time:>.2f} " - "seconds.").format( - numactions=total_goal, time=total_time)) - - def _job_output(self, outspec, jobitem): - if outspec.first: - self.__generic_start("{0} ... ".format(jobitem.name)) - if outspec.last: - self.__generic_done_item(jobitem) - - def _lint_output(self, outspec): - if outspec.first: - if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: - self._pe.cprint("{0} ... ".format( - self.lintitems.name), end='') - elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: - self._pe.cprint("# --- {0} ---".format( - self.lintitems.name)) - if outspec.last: - if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: - self.__generic_done() - elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: - pass - - def _li_recurse_start_output(self): - if self.linked_pkg_op == pkgdefs.PKG_OP_PUBCHECK: - self.__generic_start( - _("Linked image publisher check ...")) - return - elif self.linked_pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: - self.__generic_start( - _("Cleaning up hot-fix origins ...")) - return - - def _li_recurse_end_output(self): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - self.__generic_done() - return - self._pe.cprint(self._phase_prefix() + - _("Finished processing linked images.")) - - def __li_dump_output(self, output): - if not output: - return - lines = output.splitlines() - nlines = len(lines) - for linenum, line in enumerate(lines): - line = misc.force_str(line) - if linenum < nlines - 1: - self._pe.cprint("| " + line) - else: - if lines[linenum].strip() != "": - self._pe.cprint("| " + line) - self._pe.cprint("`") - - def _li_recurse_output_output(self, lin, stdout, stderr): - if not stdout and not stderr: - return - self._pe.cprint(self._phase_prefix() + - _("Linked image '{0}' output:").format(lin)) - self.__li_dump_output(stdout) - self.__li_dump_output(stderr) - - def _li_recurse_status_output(self, done): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - - running = " ".join([str(i) for i in self.linked_running]) - msg = _("Linked images: {pair} done; {numworking:d} working: " - "{running}").format( - pair=format_pair("{0:d}", done, self.linked_total), - numworking=len(self.linked_running), - running=running) - self._pe.cprint(self._phase_prefix() + msg) - - def _li_recurse_progress_output(self, lin): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - - def _reversion(self, pfmri, outspec): - if not self._ptimer.time_to_print() and not outspec: - return - - if outspec.first: - # tell ptimer that we just printed. - self._ptimer.reset_now() - - if outspec.last: - self.__generic_done( - msg=_("Reversioned {revs} of {pkgs} packages " - "and adjusted {adjs} packages.").format( - revs=self.reversion_revs.items, - pkgs=self.reversion_pkgs.items, - adjs=self.reversion_adjs.items)) - return - - self._pe.cprint( - _("Reversioning: {pkgs} processed, {revs} reversioned, " - "{adjs} adjusted").format( - pkgs=self.reversion_pkgs.pair(), - revs=self.reversion_revs.pair(), - adjs=self.reversion_adjs.items)) - - -class RADProgressTracker(CommandLineProgressTracker): - """This progress tracker is a subclass of CommandLineProgressTracker - which is specific for RAD progress event. - """ + self._pe.cprint( + self._phase_prefix() + + _("Fetching manifests: {num} {pctcomplete}% " "complete").format( + num=self.mfst_fetch.pair(), + pctcomplete=int(self.mfst_fetch.pctdone()), + ) + ) + + def _mfst_commit(self, outspec): + # For now, manifest commit is hard to handle in this + # line-oriented prog tracker, as we alternate back and forth + # between fetching and committing, and we don't want to + # spam the user with this too much. + pass + + def _repo_ver_output(self, outspec, repository_scan=False): + pass + + def _repo_ver_output_error(self, errors): + self._pe.cprint(errors) + + def _repo_ver_output_warning(self, warnings): + pass + + def _repo_ver_output_info(self, info): + pass + + def _repo_ver_output_done(self): + pass + + def _repo_fix_output(self, outspec): + pass + + def _repo_fix_output_error(self, errors): + self._pe.cprint(errors) + + def _repo_fix_output_info(self, info): + self._pe.cprint(info) + + def _repo_fix_output_done(self): + pass + + def _dl_output(self, outspec): + if ( + not self._ptimer.time_to_print() + and not outspec.first + and not outspec.last + ): + return + + # Reset timer; this prevents double printing for + # outspec.first and then again for the timer expiration + if outspec.first: + self._ptimer.reset_now() + + if not outspec.last: + speed = self.dl_estimator.get_speed_estimate() + else: + speed = self.dl_estimator.get_final_speed() + speedstr = ( + "" + if speed is None + else "({0})".format(self.dl_estimator.format_speed(speed)) + ) + + if not outspec.last: + # 'first' or time to print + mbs = format_pair( + "{0:.1f}", + self.dl_bytes.items, + self.dl_bytes.goalitems, + scale=(1024 * 1024), + ) + self._pe.cprint( + _( + "Download: {num} items {mbs}MB " + "{pctcomplete}% complete {speed}" + ).format( + num=self.dl_files.pair(), + mbs=mbs, + pctcomplete=int(self.dl_bytes.pctdone()), + speed=speedstr, + ) + ) + else: + # 'last' + goal = misc.bytes_to_str(self.dl_bytes.goalitems) + self.__generic_done( + msg=_( + "Download: Completed {num} in {sec:>.2f} " "seconds {speed}" + ).format( + num=goal, sec=self.dl_estimator.elapsed(), speed=speedstr + ) + ) + + def _republish_output(self, outspec): + if "startpkg" in outspec.changed: + pkgfmri = self.repub_pkgs.curinfo + self.__generic_start( + _("Republish: {0} ... ").format(pkgfmri.get_fmri(anarchy=True)) + ) + if "endpkg" in outspec.changed: + self.__generic_done() + + def _archive_output(self, outspec): + if not self._ptimer.time_to_print() and not outspec: + return + if outspec.first: + # tell ptimer that we just printed. + self._ptimer.reset_now() + + if outspec.last: + goal = misc.bytes_to_str(self.archive_bytes.goalitems) + self.__generic_done( + msg=_( + "Archiving: Completed {num} in {secs:>.2f} " "seconds" + ).format(num=goal, secs=self.archive_items.elapsed()) + ) + return + + mbs = format_pair( + "{0:.1f}", + self.archive_bytes.items, + self.archive_bytes.goalitems, + scale=(1024 * 1024), + ) + self._pe.cprint( + _( + "Archiving: {pair} items {mbs}MB {pctcomplete}% " "complete" + ).format( + pair=self.archive_items.pair(), + mbs=mbs, + pctcomplete=int(self.archive_bytes.pctdone()), + ) + ) + + # + # The progress tracking infrastructure wants to tell us about each + # kind of action activity (install, remove, update). For this + # progress tracker, we don't really care to expose that to the user, + # so we work in terms of total actions instead. + # + def _act_output(self, outspec, actionitem): + if not self._ptimer.time_to_print() and not outspec.first: + return + # reset timer, since we're definitely printing now... + self._ptimer.reset_now() + total_actions = sum(x.items for x in self._actionitems.values()) + total_goal = sum(x.goalitems for x in self._actionitems.values()) + self._pe.cprint( + self._phase_prefix() + + _("{num} actions ({type})").format( + num=format_pair("{0:d}", total_actions, total_goal), + type=actionitem.name, + ) + ) + + def _act_output_all_done(self): + total_goal = sum(x.goalitems for x in self._actionitems.values()) + total_time = sum(x.elapsed() for x in self._actionitems.values()) + if total_goal == 0: + return + self._pe.cprint( + self._phase_prefix() + + _( + "Completed {numactions:d} actions in {time:>.2f} " "seconds." + ).format(numactions=total_goal, time=total_time) + ) + + def _job_output(self, outspec, jobitem): + if outspec.first: + self.__generic_start("{0} ... ".format(jobitem.name)) + if outspec.last: + self.__generic_done_item(jobitem) + + def _lint_output(self, outspec): + if outspec.first: + if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: + self._pe.cprint("{0} ... ".format(self.lintitems.name), end="") + elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: + self._pe.cprint("# --- {0} ---".format(self.lintitems.name)) + if outspec.last: + if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: + self.__generic_done() + elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: + pass - # Default to printing periodic output every 5 seconds. - TERM_DELAY = 5.0 - - # Output constants. - O_PHASE = "phase" - O_MESSAGE = "message" - O_TIME = "time_taken" - O_TIME_U = "time_unit" - O_TYPE = "type" - O_PRO_ITEMS = "processed_items" - O_GOAL_ITEMS = "goal_items" - O_PCT_DONE = "percent_done" - O_ITEM_U = "item_unit" - O_SPEED = "speed" - O_RUNNING = "running" - O_GOAL_PRO_ITEMS = "goal_processed_items" - O_REV_ITEMS = "reversioned_items" - O_GOAL_REV_ITEMS = "goal_reversion_items" - O_ADJ_ITEMS = "adjusted_items" - O_LI_OUTPUT = "li_output" - O_LI_ERROR = "li_errors" - - def __init__(self, term_delay=TERM_DELAY, prog_event_handler=None): - CommandLineProgressTracker.__init__(self, - term_delay=term_delay) - self.__prog_event_handler = prog_event_handler - - def _phase_prefix(self): - if self.major_phase == self.PHASE_UTILITY: - return "Utility" - - return self.phase_names[self.major_phase] + def _li_recurse_start_output(self): + if self.linked_pkg_op == pkgdefs.PKG_OP_PUBCHECK: + self.__generic_start(_("Linked image publisher check ...")) + return + elif self.linked_pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: + self.__generic_start(_("Cleaning up hot-fix origins ...")) + return + + def _li_recurse_end_output(self): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + self.__generic_done() + return + self._pe.cprint( + self._phase_prefix() + _("Finished processing linked images.") + ) + + def __li_dump_output(self, output): + if not output: + return + lines = output.splitlines() + nlines = len(lines) + for linenum, line in enumerate(lines): + line = misc.force_str(line) + if linenum < nlines - 1: + self._pe.cprint("| " + line) + else: + if lines[linenum].strip() != "": + self._pe.cprint("| " + line) + self._pe.cprint("`") + + def _li_recurse_output_output(self, lin, stdout, stderr): + if not stdout and not stderr: + return + self._pe.cprint( + self._phase_prefix() + _("Linked image '{0}' output:").format(lin) + ) + self.__li_dump_output(stdout) + self.__li_dump_output(stderr) + + def _li_recurse_status_output(self, done): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + + running = " ".join([str(i) for i in self.linked_running]) + msg = _( + "Linked images: {pair} done; {numworking:d} working: " "{running}" + ).format( + pair=format_pair("{0:d}", done, self.linked_total), + numworking=len(self.linked_running), + running=running, + ) + self._pe.cprint(self._phase_prefix() + msg) + + def _li_recurse_progress_output(self, lin): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + + def _reversion(self, pfmri, outspec): + if not self._ptimer.time_to_print() and not outspec: + return + + if outspec.first: + # tell ptimer that we just printed. + self._ptimer.reset_now() + + if outspec.last: + self.__generic_done( + msg=_( + "Reversioned {revs} of {pkgs} packages " + "and adjusted {adjs} packages." + ).format( + revs=self.reversion_revs.items, + pkgs=self.reversion_pkgs.items, + adjs=self.reversion_adjs.items, + ) + ) + return + + self._pe.cprint( + _( + "Reversioning: {pkgs} processed, {revs} reversioned, " + "{adjs} adjusted" + ).format( + pkgs=self.reversion_pkgs.pair(), + revs=self.reversion_revs.pair(), + adjs=self.reversion_adjs.items, + ) + ) - # - # Helper routines - # - def __prep_prog_json(self, msg=None, phase=None, prog_json=None): - # prepare progress json. - phase_name = self._phase_prefix() - if phase: - phase_name = phase - if prog_json: - return prog_json - else: - return {self.O_PHASE: phase_name, - self.O_MESSAGE: msg} - - def __handle_prog_output(self, prog_json, end="\n"): - # If event handler is set, report an event. Otherwise, print. - if self.__prog_event_handler: - self.__prog_event_handler(event=prog_json) - else: - self._pe.cprint(json.dumps(prog_json), end=end) - - def __generic_start(self, msg): - # In the case of listing/up-to-date check operations, we - # don't want to output planning information, so skip. - if self.purpose != self.PURPOSE_NORMAL: - return - - prog_json = self.__prep_prog_json(msg) - self.__handle_prog_output(prog_json) - # indicate that we just printed. - self._ptimer.reset_now() - - def __generic_done(self, msg=None, phase=None, prog_json=None): - # See __generic_start above. - if self.purpose != self.PURPOSE_NORMAL: - return - if msg is None: - msg = _("Done") - prog_json = self.__prep_prog_json(msg, phase, prog_json) - self.__handle_prog_output(prog_json, end='\n') - self._ptimer.reset() - - def __generic_done_item(self, item, msg=None): - # See __generic_start above. - if self.purpose != self.PURPOSE_NORMAL: - return - if msg is None: - if global_settings.client_output_verbose > 0: - msg = _("Done ({elapsed:>.3f}s)") - else: - msg = _("Done") - outmsg = msg.format(elapsed=item.elapsed()) - prog_json = self.__prep_prog_json(outmsg) - self.__handle_prog_output(prog_json, end='\n') - self._ptimer.reset() - - def _change_purpose(self, op, np): - self._ptimer.reset() - if np == self.PURPOSE_PKG_UPDATE_CHK: - prog_json = self.__prep_prog_json( - _("Checking that pkg(7) is up to date ...")) - self.__handle_prog_output(prog_json) - - def _cache_cats_output(self, outspec): - if outspec.first: - self.__generic_start(_("Caching catalogs ...")) - if outspec.last: - self.__generic_done() - - def _load_cat_cache_output(self, outspec): - if outspec.first: - self.__generic_start(_("Loading catalog cache ...")) - if outspec.last: - self.__generic_done() - - def _refresh_output_progress(self, outspec): - # See __generic_start above. - if self.purpose != self.PURPOSE_NORMAL: - return - if "startpublisher" in outspec.changed: - p = self.pub_refresh.curinfo.prefix - if self.refresh_target_catalog: - m = _("Retrieving target catalog '{0}' " - "...").format(p) - elif self.refresh_full_refresh: - m = _("Retrieving catalog '{0}' ...").format(p) - else: - m = _("Refreshing catalog '{0}' ...").format(p) - self.__generic_start(m) - elif "endpublisher" in outspec.changed: - self.__generic_done() - - def _plan_output(self, outspec, planitem): - if outspec.first: - self.__generic_start(_("{0} ...").format(planitem.name)) - if outspec.last: - self.__generic_done_item(planitem) - - def _plan_output_all_done(self): - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Planning completed"), - self.O_TIME: self.plan_generic.elapsed(), - self.O_TIME_U: _("second")} - self.__generic_done(prog_json=prog_json) - - def _mfst_fetch(self, outspec): - if not self._ptimer.time_to_print() and \ - not outspec.first and not outspec.last: - return - if self.purpose != self.PURPOSE_NORMAL: - return - - # Reset timer; this prevents double printing for - # outspec.first and then again for the timer expiration - if outspec.first: - self._ptimer.reset_now() - # - # There are a couple of reasons we might fetch manifests-- - # pkgrecv, pkglint, etc. can all do this. _phase_prefix() - # adjusts the output based on the major phase. - # - goalitems = self.mfst_fetch.goalitems - if goalitems is None: - goalitems = 0 - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Fetching manifests"), - self.O_PRO_ITEMS: self.mfst_fetch.items, - self.O_GOAL_ITEMS: goalitems, - self.O_PCT_DONE: int(self.mfst_fetch.pctdone()), - self.O_ITEM_U: _("manifest") - } - self.__handle_prog_output(prog_json) +class RADProgressTracker(CommandLineProgressTracker): + """This progress tracker is a subclass of CommandLineProgressTracker + which is specific for RAD progress event. + """ + + # Default to printing periodic output every 5 seconds. + TERM_DELAY = 5.0 + + # Output constants. + O_PHASE = "phase" + O_MESSAGE = "message" + O_TIME = "time_taken" + O_TIME_U = "time_unit" + O_TYPE = "type" + O_PRO_ITEMS = "processed_items" + O_GOAL_ITEMS = "goal_items" + O_PCT_DONE = "percent_done" + O_ITEM_U = "item_unit" + O_SPEED = "speed" + O_RUNNING = "running" + O_GOAL_PRO_ITEMS = "goal_processed_items" + O_REV_ITEMS = "reversioned_items" + O_GOAL_REV_ITEMS = "goal_reversion_items" + O_ADJ_ITEMS = "adjusted_items" + O_LI_OUTPUT = "li_output" + O_LI_ERROR = "li_errors" + + def __init__(self, term_delay=TERM_DELAY, prog_event_handler=None): + CommandLineProgressTracker.__init__(self, term_delay=term_delay) + self.__prog_event_handler = prog_event_handler + + def _phase_prefix(self): + if self.major_phase == self.PHASE_UTILITY: + return "Utility" + + return self.phase_names[self.major_phase] + + # + # Helper routines + # + def __prep_prog_json(self, msg=None, phase=None, prog_json=None): + # prepare progress json. + phase_name = self._phase_prefix() + if phase: + phase_name = phase + if prog_json: + return prog_json + else: + return {self.O_PHASE: phase_name, self.O_MESSAGE: msg} - def _dl_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec.first and \ - not outspec.last: - return - - # Reset timer; this prevents double printing for - # outspec.first and then again for the timer expiration - if outspec.first: - self._ptimer.reset_now() - - if not outspec.last: - speed = self.dl_estimator.get_speed_estimate() - else: - speed = self.dl_estimator.get_final_speed() - speedstr = "" if speed is None else \ - "({0})".format(self.dl_estimator.format_speed(speed)) - - if not outspec.last: - # 'first' or time to print - prog_json = { - self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Downloading"), - self.O_PRO_ITEMS: self.dl_bytes.items, - self.O_GOAL_ITEMS: self.dl_bytes.goalitems, - self.O_PCT_DONE: int(self.dl_bytes.pctdone()), - self.O_SPEED: speedstr, - self.O_ITEM_U: _("byte") - } - self.__handle_prog_output(prog_json) - else: - # 'last' - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Download completed"), - self.O_PRO_ITEMS: self.dl_bytes.goalitems, - self.O_SPEED: speedstr, - self.O_ITEM_U: _("byte"), - self.O_TIME: self.dl_estimator.elapsed(), - self.O_TIME_U: _("second") - } - self.__generic_done(prog_json=prog_json) - - def _republish_output(self, outspec): - if "startpkg" in outspec.changed: - pkgfmri = self.repub_pkgs.curinfo - self.__generic_start(_("Republish: {0} ... ").format( - pkgfmri.get_fmri(anarchy=True))) - if "endpkg" in outspec.changed: - self.__generic_done() - - def _archive_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec: - return - if outspec.first: - # tell ptimer that we just printed. - self._ptimer.reset_now() - - if outspec.last: - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Archiving completed"), - self.O_PRO_ITEMS: self.archive_bytes.goalitems, - self.O_ITEM_U: _("byte"), - self.O_TIME: self.archive_items.elapsed(), - self.O_TIME_U: _("second") - } - self.__generic_done(prog_json=prog_json) - return - - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Archiving"), - self.O_PRO_ITEMS: self.archive_bytes.items, - self.O_GOAL_ITEMS: self.archive_bytes.goalitems, - self.O_PCT_DONE: int(self.archive_bytes.pctdone()), - self.O_ITEM_U: _("byte") - } - self.__handle_prog_output(prog_json) + def __handle_prog_output(self, prog_json, end="\n"): + # If event handler is set, report an event. Otherwise, print. + if self.__prog_event_handler: + self.__prog_event_handler(event=prog_json) + else: + self._pe.cprint(json.dumps(prog_json), end=end) + + def __generic_start(self, msg): + # In the case of listing/up-to-date check operations, we + # don't want to output planning information, so skip. + if self.purpose != self.PURPOSE_NORMAL: + return + + prog_json = self.__prep_prog_json(msg) + self.__handle_prog_output(prog_json) + # indicate that we just printed. + self._ptimer.reset_now() + + def __generic_done(self, msg=None, phase=None, prog_json=None): + # See __generic_start above. + if self.purpose != self.PURPOSE_NORMAL: + return + if msg is None: + msg = _("Done") + prog_json = self.__prep_prog_json(msg, phase, prog_json) + self.__handle_prog_output(prog_json, end="\n") + self._ptimer.reset() + + def __generic_done_item(self, item, msg=None): + # See __generic_start above. + if self.purpose != self.PURPOSE_NORMAL: + return + if msg is None: + if global_settings.client_output_verbose > 0: + msg = _("Done ({elapsed:>.3f}s)") + else: + msg = _("Done") + outmsg = msg.format(elapsed=item.elapsed()) + prog_json = self.__prep_prog_json(outmsg) + self.__handle_prog_output(prog_json, end="\n") + self._ptimer.reset() + + def _change_purpose(self, op, np): + self._ptimer.reset() + if np == self.PURPOSE_PKG_UPDATE_CHK: + prog_json = self.__prep_prog_json( + _("Checking that pkg(7) is up to date ...") + ) + self.__handle_prog_output(prog_json) + + def _cache_cats_output(self, outspec): + if outspec.first: + self.__generic_start(_("Caching catalogs ...")) + if outspec.last: + self.__generic_done() + + def _load_cat_cache_output(self, outspec): + if outspec.first: + self.__generic_start(_("Loading catalog cache ...")) + if outspec.last: + self.__generic_done() + + def _refresh_output_progress(self, outspec): + # See __generic_start above. + if self.purpose != self.PURPOSE_NORMAL: + return + if "startpublisher" in outspec.changed: + p = self.pub_refresh.curinfo.prefix + if self.refresh_target_catalog: + m = _("Retrieving target catalog '{0}' " "...").format(p) + elif self.refresh_full_refresh: + m = _("Retrieving catalog '{0}' ...").format(p) + else: + m = _("Refreshing catalog '{0}' ...").format(p) + self.__generic_start(m) + elif "endpublisher" in outspec.changed: + self.__generic_done() + + def _plan_output(self, outspec, planitem): + if outspec.first: + self.__generic_start(_("{0} ...").format(planitem.name)) + if outspec.last: + self.__generic_done_item(planitem) + + def _plan_output_all_done(self): + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Planning completed"), + self.O_TIME: self.plan_generic.elapsed(), + self.O_TIME_U: _("second"), + } + self.__generic_done(prog_json=prog_json) + + def _mfst_fetch(self, outspec): + if ( + not self._ptimer.time_to_print() + and not outspec.first + and not outspec.last + ): + return + if self.purpose != self.PURPOSE_NORMAL: + return + + # Reset timer; this prevents double printing for + # outspec.first and then again for the timer expiration + if outspec.first: + self._ptimer.reset_now() # - # The progress tracking infrastructure wants to tell us about each - # kind of action activity (install, remove, update). For this - # progress tracker, we don't really care to expose that to the user, - # so we work in terms of total actions instead. + # There are a couple of reasons we might fetch manifests-- + # pkgrecv, pkglint, etc. can all do this. _phase_prefix() + # adjusts the output based on the major phase. # - def _act_output(self, outspec, actionitem): - if not self._ptimer.time_to_print() and not outspec.first: - return - # reset timer, since we're definitely printing now... - self._ptimer.reset_now() - total_actions = \ - sum(x.items for x in self._actionitems.values()) - total_goal = \ - sum(x.goalitems for x in self._actionitems.values()) - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Action activity"), - self.O_PRO_ITEMS: total_actions, - self.O_GOAL_ITEMS: total_goal, - self.O_TYPE: actionitem.name, - self.O_ITEM_U: _("action") - } - self.__handle_prog_output(prog_json) - - def _act_output_all_done(self): - total_goal = \ - sum(x.goalitems for x in self._actionitems.values()) - total_time = \ - sum(x.elapsed() for x in self._actionitems.values()) - if total_goal == 0: - return - - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Completed actions activities"), - self.O_PRO_ITEMS: total_goal, - self.O_ITEM_U: _("action"), - self.O_TIME: total_time, - self.O_TIME_U: _("second") - } - self.__handle_prog_output(prog_json) - - def _job_output(self, outspec, jobitem): - if outspec.first: - self.__generic_start("{0} ... ".format(jobitem.name)) - if outspec.last: - self.__generic_done_item(jobitem) - - def _lint_output(self, outspec): - if outspec.first: - if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: - msg = "{0} ... ".format( - self.lintitems.name) - prog_json = {self.O_PHASE: _("Setup"), - self.O_MESSAGE: msg - } - self.__handle_prog_output(prog_json) - elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: - msg = "# --- {0} ---".format( - self.lintitems.name) - prog_json = {self.O_PHASE: _("Execute"), - self.O_MESSAGE: msg - } - self.__handle_prog_output(prog_json) - if outspec.last: - if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: - self.__generic_done(phase=_("Setup")) - elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: - pass - - def _li_recurse_start_output(self): - if self.linked_pkg_op == pkgdefs.PKG_OP_PUBCHECK: - self.__generic_start( - _("Linked image publisher check ...")) - return - elif self.linked_pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: - self.__generic_start( - _("Cleaning up hot-fix origins ...")) - return - - def _li_recurse_end_output(self): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - self.__generic_done() - return - prog_json = self.__prep_prog_json( - _("Finished processing linked images.")) + goalitems = self.mfst_fetch.goalitems + if goalitems is None: + goalitems = 0 + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Fetching manifests"), + self.O_PRO_ITEMS: self.mfst_fetch.items, + self.O_GOAL_ITEMS: goalitems, + self.O_PCT_DONE: int(self.mfst_fetch.pctdone()), + self.O_ITEM_U: _("manifest"), + } + self.__handle_prog_output(prog_json) + + def _dl_output(self, outspec): + if ( + not self._ptimer.time_to_print() + and not outspec.first + and not outspec.last + ): + return + + # Reset timer; this prevents double printing for + # outspec.first and then again for the timer expiration + if outspec.first: + self._ptimer.reset_now() + + if not outspec.last: + speed = self.dl_estimator.get_speed_estimate() + else: + speed = self.dl_estimator.get_final_speed() + speedstr = ( + "" + if speed is None + else "({0})".format(self.dl_estimator.format_speed(speed)) + ) + + if not outspec.last: + # 'first' or time to print + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Downloading"), + self.O_PRO_ITEMS: self.dl_bytes.items, + self.O_GOAL_ITEMS: self.dl_bytes.goalitems, + self.O_PCT_DONE: int(self.dl_bytes.pctdone()), + self.O_SPEED: speedstr, + self.O_ITEM_U: _("byte"), + } + self.__handle_prog_output(prog_json) + else: + # 'last' + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Download completed"), + self.O_PRO_ITEMS: self.dl_bytes.goalitems, + self.O_SPEED: speedstr, + self.O_ITEM_U: _("byte"), + self.O_TIME: self.dl_estimator.elapsed(), + self.O_TIME_U: _("second"), + } + self.__generic_done(prog_json=prog_json) + + def _republish_output(self, outspec): + if "startpkg" in outspec.changed: + pkgfmri = self.repub_pkgs.curinfo + self.__generic_start( + _("Republish: {0} ... ").format(pkgfmri.get_fmri(anarchy=True)) + ) + if "endpkg" in outspec.changed: + self.__generic_done() + + def _archive_output(self, outspec): + if not self._ptimer.time_to_print() and not outspec: + return + if outspec.first: + # tell ptimer that we just printed. + self._ptimer.reset_now() + + if outspec.last: + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Archiving completed"), + self.O_PRO_ITEMS: self.archive_bytes.goalitems, + self.O_ITEM_U: _("byte"), + self.O_TIME: self.archive_items.elapsed(), + self.O_TIME_U: _("second"), + } + self.__generic_done(prog_json=prog_json) + return + + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Archiving"), + self.O_PRO_ITEMS: self.archive_bytes.items, + self.O_GOAL_ITEMS: self.archive_bytes.goalitems, + self.O_PCT_DONE: int(self.archive_bytes.pctdone()), + self.O_ITEM_U: _("byte"), + } + self.__handle_prog_output(prog_json) + + # + # The progress tracking infrastructure wants to tell us about each + # kind of action activity (install, remove, update). For this + # progress tracker, we don't really care to expose that to the user, + # so we work in terms of total actions instead. + # + def _act_output(self, outspec, actionitem): + if not self._ptimer.time_to_print() and not outspec.first: + return + # reset timer, since we're definitely printing now... + self._ptimer.reset_now() + total_actions = sum(x.items for x in self._actionitems.values()) + total_goal = sum(x.goalitems for x in self._actionitems.values()) + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Action activity"), + self.O_PRO_ITEMS: total_actions, + self.O_GOAL_ITEMS: total_goal, + self.O_TYPE: actionitem.name, + self.O_ITEM_U: _("action"), + } + self.__handle_prog_output(prog_json) + + def _act_output_all_done(self): + total_goal = sum(x.goalitems for x in self._actionitems.values()) + total_time = sum(x.elapsed() for x in self._actionitems.values()) + if total_goal == 0: + return + + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Completed actions activities"), + self.O_PRO_ITEMS: total_goal, + self.O_ITEM_U: _("action"), + self.O_TIME: total_time, + self.O_TIME_U: _("second"), + } + self.__handle_prog_output(prog_json) + + def _job_output(self, outspec, jobitem): + if outspec.first: + self.__generic_start("{0} ... ".format(jobitem.name)) + if outspec.last: + self.__generic_done_item(jobitem) + + def _lint_output(self, outspec): + if outspec.first: + if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: + msg = "{0} ... ".format(self.lintitems.name) + prog_json = {self.O_PHASE: _("Setup"), self.O_MESSAGE: msg} self.__handle_prog_output(prog_json) - - def __li_dump_output(self, output): - if not output: - return [] - lines = output.splitlines() - return lines - - def _li_recurse_output_output(self, lin, stdout, stderr): - if not stdout and not stderr: - return - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Linked image '{0}' output:").format(lin)} - prog_json[self.O_LI_OUTPUT] = self.__li_dump_output(stdout) - prog_json[self.O_LI_ERROR] = self.__li_dump_output(stderr) + elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: + msg = "# --- {0} ---".format(self.lintitems.name) + prog_json = {self.O_PHASE: _("Execute"), self.O_MESSAGE: msg} self.__handle_prog_output(prog_json) + if outspec.last: + if self.lint_phasetype == self.LINT_PHASETYPE_SETUP: + self.__generic_done(phase=_("Setup")) + elif self.lint_phasetype == self.LINT_PHASETYPE_EXECUTE: + pass - def _li_recurse_status_output(self, done): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - - prog_json = {self.O_PHASE: self._phase_prefix(), - self.O_MESSAGE: _("Linked images status"), - self.O_PRO_ITEMS: done, - self.O_GOAL_ITEMS: self.linked_total, - self.O_ITEM_U: _("linked image"), - self.O_RUNNING: [str(i) for i in self.linked_running] - } - - self.__handle_prog_output(prog_json) + def _li_recurse_start_output(self): + if self.linked_pkg_op == pkgdefs.PKG_OP_PUBCHECK: + self.__generic_start(_("Linked image publisher check ...")) + return + elif self.linked_pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: + self.__generic_start(_("Cleaning up hot-fix origins ...")) + return + + def _li_recurse_end_output(self): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + self.__generic_done() + return + prog_json = self.__prep_prog_json( + _("Finished processing linked images.") + ) + self.__handle_prog_output(prog_json) + + def __li_dump_output(self, output): + if not output: + return [] + lines = output.splitlines() + return lines + + def _li_recurse_output_output(self, lin, stdout, stderr): + if not stdout and not stderr: + return + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Linked image '{0}' output:").format(lin), + } + prog_json[self.O_LI_OUTPUT] = self.__li_dump_output(stdout) + prog_json[self.O_LI_ERROR] = self.__li_dump_output(stderr) + self.__handle_prog_output(prog_json) + + def _li_recurse_status_output(self, done): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + + prog_json = { + self.O_PHASE: self._phase_prefix(), + self.O_MESSAGE: _("Linked images status"), + self.O_PRO_ITEMS: done, + self.O_GOAL_ITEMS: self.linked_total, + self.O_ITEM_U: _("linked image"), + self.O_RUNNING: [str(i) for i in self.linked_running], + } - def _li_recurse_progress_output(self, lin): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - - def _reversion(self, pfmri, outspec): - if not self._ptimer.time_to_print() and not outspec: - return - - if outspec.first: - # tell ptimer that we just printed. - self._ptimer.reset_now() - - if outspec.last: - prog_json = {self.O_PHASE: _("Reversion"), - self.O_MESSAGE: _("Done"), - self.O_PRO_ITEMS: self.reversion_pkgs.items, - self.O_REV_ITEMS: self.reversion_revs.items, - self.O_ADJ_ITEMS: self.reversion_adjs.items, - self.O_ITEM_U: _("package") - } - self.__generic_done(prog_json=prog_json) - return - - prog_json = {self.O_PHASE: _("Reversion"), - self.O_MESSAGE: "Reversioning", - self.O_PRO_ITEMS: self.reversion_pkgs.items, - self.O_GOAL_PRO_ITEMS: self.reversion_pkgs.goalitems, - self.O_REV_ITEMS: self.reversion_revs.items, - self.O_GOAL_REV_ITEMS: self.reversion_revs.goalitems, - self.O_ADJ_ITEMS: self.reversion_adjs.items, - self.O_ITEM_U: _("package") - } - self.__handle_prog_output(prog_json) + self.__handle_prog_output(prog_json) + + def _li_recurse_progress_output(self, lin): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + + def _reversion(self, pfmri, outspec): + if not self._ptimer.time_to_print() and not outspec: + return + + if outspec.first: + # tell ptimer that we just printed. + self._ptimer.reset_now() + + if outspec.last: + prog_json = { + self.O_PHASE: _("Reversion"), + self.O_MESSAGE: _("Done"), + self.O_PRO_ITEMS: self.reversion_pkgs.items, + self.O_REV_ITEMS: self.reversion_revs.items, + self.O_ADJ_ITEMS: self.reversion_adjs.items, + self.O_ITEM_U: _("package"), + } + self.__generic_done(prog_json=prog_json) + return + + prog_json = { + self.O_PHASE: _("Reversion"), + self.O_MESSAGE: "Reversioning", + self.O_PRO_ITEMS: self.reversion_pkgs.items, + self.O_GOAL_PRO_ITEMS: self.reversion_pkgs.goalitems, + self.O_REV_ITEMS: self.reversion_revs.items, + self.O_GOAL_REV_ITEMS: self.reversion_revs.goalitems, + self.O_ADJ_ITEMS: self.reversion_adjs.items, + self.O_ITEM_U: _("package"), + } + self.__handle_prog_output(prog_json) + + @classmethod + def get_json_schema(cls): + """Construct json schema.""" + + json_schema = { + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "progress schema", + "type": "object", + "properties": { + cls.O_PHASE: {"type": "string"}, + cls.O_MESSAGE: {"type": "string"}, + cls.O_TIME: {"type": "number"}, + cls.O_TIME_U: {"type": "string"}, + cls.O_TYPE: {"type": "string"}, + cls.O_PRO_ITEMS: {"type": "number"}, + cls.O_GOAL_ITEMS: {"type": "number"}, + cls.O_PCT_DONE: {"type": "number"}, + cls.O_ITEM_U: {"type": "string"}, + cls.O_SPEED: {"type": "string"}, + cls.O_RUNNING: {"type": "array"}, + cls.O_GOAL_PRO_ITEMS: {"type": "number"}, + cls.O_REV_ITEMS: {"type": "number"}, + cls.O_GOAL_REV_ITEMS: {"type": "number"}, + cls.O_ADJ_ITEMS: {"type": "number"}, + cls.O_LI_OUTPUT: {"type": "array"}, + cls.O_LI_ERROR: {"type": "array"}, + }, + "required": [cls.O_PHASE, cls.O_MESSAGE], + } + return json_schema - @classmethod - def get_json_schema(cls): - """Construct json schema.""" - - json_schema = {"$schema": - "http://json-schema.org/draft-04/schema#", - "title": "progress schema", - "type": "object", - "properties": {cls.O_PHASE: {"type": "string"}, - cls.O_MESSAGE: {"type": "string"}, - cls.O_TIME: {"type": "number"}, - cls.O_TIME_U: {"type": "string"}, - cls.O_TYPE: {"type": "string"}, - cls.O_PRO_ITEMS: {"type": "number"}, - cls.O_GOAL_ITEMS: {"type": "number"}, - cls.O_PCT_DONE: {"type": "number"}, - cls.O_ITEM_U: {"type": "string"}, - cls.O_SPEED: {"type": "string"}, - cls.O_RUNNING: {"type": "array"}, - cls.O_GOAL_PRO_ITEMS: {"type": "number"}, - cls.O_REV_ITEMS : {"type": "number"}, - cls.O_GOAL_REV_ITEMS: {"type": "number"}, - cls.O_ADJ_ITEMS: {"type": "number"}, - cls.O_LI_OUTPUT : {"type": "array"}, - cls.O_LI_ERROR : {"type": "array"}, - }, - "required": [cls.O_PHASE, cls.O_MESSAGE] - } - return json_schema class LinkedChildProgressTracker(CommandLineProgressTracker): - """This tracker is used for recursion with linked children. - This is intended for use only by linked images.""" - - def __init__(self, output_file): - CommandLineProgressTracker.__init__(self, output_file) - - # We modify the instance such that everything except for the - # linked image methods are no-opped out. In multi-level - # recursion, this ensures that output from children is - # displayed. - - def __donothing(*args, **kwargs): - # Unused argument 'args', 'kwargs'; - # pylint: disable=W0613 - pass - - for methname in ProgressTrackerBackend.__dict__: - if methname == "__init__": - continue - if methname.startswith("_li_recurse"): - continue - boundmeth = getattr(self, methname) - if not inspect.ismethod(boundmeth): - continue - setattr(self, methname, __donothing) + """This tracker is used for recursion with linked children. + This is intended for use only by linked images.""" -class FancyUNIXProgressTracker(ProgressTracker): - """This progress tracker is designed for UNIX-like OS's-- those which - have UNIX-like terminal semantics. It attempts to load the 'curses' - package. If that or other terminal-liveness tests fail, it gives up: - the client should pick some other more suitable tracker. (Probably - CommandLineProgressTracker).""" + def __init__(self, output_file): + CommandLineProgressTracker.__init__(self, output_file) - # - # The minimum interval (in seconds) at which we should update the - # display during operations which produce a lot of output. Needed to - # avoid spamming a slow terminal. - # - TERM_DELAY = 0.10 - TERM_DELAY_SLOW = 0.25 - - def __init__(self, output_file=sys.stdout, term_delay=None): - ProgressTracker.__init__(self) + # We modify the instance such that everything except for the + # linked image methods are no-opped out. In multi-level + # recursion, this ensures that output from children is + # displayed. - try: - self._pe = printengine.POSIXPrintEngine(output_file, - ttymode=True) - except printengine.PrintEngineException as e: - raise ProgressTrackerException( - "Couldn't create print engine: {0}".format( - " ".join(e.args))) + def __donothing(*args, **kwargs): + # Unused argument 'args', 'kwargs'; + # pylint: disable=W0613 + pass - if term_delay is None: - term_delay = self.TERM_DELAY_SLOW if self._pe.isslow() \ - else self.TERM_DELAY - self._ptimer = PrintTimer(term_delay) + for methname in ProgressTrackerBackend.__dict__: + if methname == "__init__": + continue + if methname.startswith("_li_recurse"): + continue + boundmeth = getattr(self, methname) + if not inspect.ismethod(boundmeth): + continue + setattr(self, methname, __donothing) - self._phases_hdr_printed = False - self._jobs_lastjob = None - if not output_file.isatty(): - raise ProgressTrackerException( - "output_file is not a TTY") +class FancyUNIXProgressTracker(ProgressTracker): + """This progress tracker is designed for UNIX-like OS's-- those which + have UNIX-like terminal semantics. It attempts to load the 'curses' + package. If that or other terminal-liveness tests fail, it gives up: + the client should pick some other more suitable tracker. (Probably + CommandLineProgressTracker).""" + + # + # The minimum interval (in seconds) at which we should update the + # display during operations which produce a lot of output. Needed to + # avoid spamming a slow terminal. + # + TERM_DELAY = 0.10 + TERM_DELAY_SLOW = 0.25 + + def __init__(self, output_file=sys.stdout, term_delay=None): + ProgressTracker.__init__(self) - self.__spinner_chars = "|/-\\" + try: + self._pe = printengine.POSIXPrintEngine(output_file, ttymode=True) + except printengine.PrintEngineException as e: + raise ProgressTrackerException( + "Couldn't create print engine: {0}".format(" ".join(e.args)) + ) + + if term_delay is None: + term_delay = ( + self.TERM_DELAY_SLOW if self._pe.isslow() else self.TERM_DELAY + ) + self._ptimer = PrintTimer(term_delay) + + self._phases_hdr_printed = False + self._jobs_lastjob = None + + if not output_file.isatty(): + raise ProgressTrackerException("output_file is not a TTY") + + self.__spinner_chars = "|/-\\" + + # For linked image spinners. + self.__linked_spinners = [] + + # + # Overridden methods from ProgressTrackerBackend + # + def _output_flush(self): + self._pe.flush() + + def __generic_start(self, msg): + # Ensure the last message displayed is flushed in case the + # corresponding operation did not complete successfully. + self.__generic_done() + self._pe.cprint(msg, end="", erase=True) + + def __generic_done(self): + self._pe.cprint("", end="", erase=True) + self._ptimer.reset() + + def __generic_done_newline(self): + self._pe.cprint("") + self._ptimer.reset() + + def _spinner(self): + sp = self._ptimer.print_value % len(self.__spinner_chars) + return self.__spinner_chars[sp] + + def _up2date(self): + if not self._ptimer.time_to_print(): + return + self._pe.cprint( + _("Checking that pkg(7) is up to date {0}").format(self._spinner()), + end="", + erase=True, + ) + + # Unused argument 'op'; pylint: disable=W0613 + def _change_purpose(self, op, np): + self._ptimer.reset() + if np == self.PURPOSE_PKG_UPDATE_CHK: + self._up2date() + + def _cache_cats_output(self, outspec): + if outspec.first: + self.__generic_start(_("Caching catalogs ...")) + if outspec.last: + self.__generic_done() + + def _load_cat_cache_output(self, outspec): + if outspec.first: + self.__generic_start(_("Loading catalog cache ...")) + if outspec.last: + self.__generic_done() + + def _refresh_output_progress(self, outspec): + if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: + self._up2date() + return + if self._ptimer.time_to_print() and not outspec: + return + + # for very small xfers (like when we just get the attrs) this + # isn't very interesting, so elide it. + if self.pub_refresh_bytes.items <= 32 * 1024: + nbytes = "" + else: + nbytes = " " + misc.bytes_to_str(self.pub_refresh_bytes.items) - # For linked image spinners. - self.__linked_spinners = [] + if self.refresh_target_catalog: + prefix = _("Retrieving target catalog") + elif self.refresh_full_refresh: + prefix = _("Retrieving catalog") + else: + prefix = _("Refreshing catalog") + msg = _("{prefix} {pub_cnt} {publisher}{bytes}").format( + prefix=prefix, + pub_cnt=self.pub_refresh.pairplus1(), + publisher=self.pub_refresh.curinfo, + bytes=nbytes, + ) + + self._pe.cprint(msg, end="", erase=True) + if outspec.last: + self.__generic_done() + + def _plan_output(self, outspec, planitem): + if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: + self._up2date() + return + if outspec.first: + self.__generic_start("") + if not self._ptimer.time_to_print() and not outspec: + return + + extra_info = "" + if isinstance(planitem, GoalTrackerItem): + extra_info = ": {0}".format(planitem.pair()) + msg = _("Creating Plan ({name}{info}): {spinner}").format( + name=planitem.name, info=extra_info, spinner=self._spinner() + ) + self._pe.cprint(msg, sep="", end="", erase=True) + + def _plan_output_all_done(self): + self.__generic_done() + + def _mfst_fetch(self, outspec): + if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: + self._up2date() + return + if outspec.first: + self.__generic_start("") + if not self._ptimer.time_to_print() and not outspec: + return # - # Overridden methods from ProgressTrackerBackend + # There are a couple of reasons we might fetch manifests-- + # pkgrecv, pkglint, etc. can all do this. So we adjust + # the output based on the major mode. # - def _output_flush(self): - self._pe.flush() - - def __generic_start(self, msg): - # Ensure the last message displayed is flushed in case the - # corresponding operation did not complete successfully. - self.__generic_done() - self._pe.cprint(msg, end='', erase=True) - - def __generic_done(self): - self._pe.cprint("", end='', erase=True) - self._ptimer.reset() - - def __generic_done_newline(self): - self._pe.cprint("") - self._ptimer.reset() - - def _spinner(self): - sp = self._ptimer.print_value % len(self.__spinner_chars) - return self.__spinner_chars[sp] - - def _up2date(self): - if not self._ptimer.time_to_print(): - return - self._pe.cprint( - _("Checking that pkg(7) is up to date {0}").format( - self._spinner()), end='', erase=True) - - # Unused argument 'op'; pylint: disable=W0613 - def _change_purpose(self, op, np): - self._ptimer.reset() - if np == self.PURPOSE_PKG_UPDATE_CHK: - self._up2date() - - def _cache_cats_output(self, outspec): - if outspec.first: - self.__generic_start(_("Caching catalogs ...")) - if outspec.last: - self.__generic_done() - - def _load_cat_cache_output(self, outspec): - if outspec.first: - self.__generic_start(_("Loading catalog cache ...")) - if outspec.last: - self.__generic_done() - - def _refresh_output_progress(self, outspec): - if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: - self._up2date() - return - if self._ptimer.time_to_print() and not outspec: - return - - # for very small xfers (like when we just get the attrs) this - # isn't very interesting, so elide it. - if self.pub_refresh_bytes.items <= 32 * 1024: - nbytes = "" - else: - nbytes = " " + \ - misc.bytes_to_str(self.pub_refresh_bytes.items) - - if self.refresh_target_catalog: - prefix = _("Retrieving target catalog") - elif self.refresh_full_refresh: - prefix = _("Retrieving catalog") - else: - prefix = _("Refreshing catalog") - msg = _("{prefix} {pub_cnt} {publisher}{bytes}").format( - prefix=prefix, - pub_cnt=self.pub_refresh.pairplus1(), - publisher=self.pub_refresh.curinfo, - bytes=nbytes) - - self._pe.cprint(msg, end="", erase=True) - if outspec.last: - self.__generic_done() - - def _plan_output(self, outspec, planitem): - if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: - self._up2date() - return - if outspec.first: - self.__generic_start("") - if not self._ptimer.time_to_print() and not outspec: - return - - extra_info = "" - if isinstance(planitem, GoalTrackerItem): - extra_info = ": {0}".format(planitem.pair()) - msg = _("Creating Plan ({name}{info}): {spinner}").format( - name=planitem.name, - info=extra_info, - spinner=self._spinner()) - self._pe.cprint(msg, sep='', end='', erase=True) - - def _plan_output_all_done(self): - self.__generic_done() - - def _mfst_fetch(self, outspec): - if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: - self._up2date() - return - if outspec.first: - self.__generic_start("") - if not self._ptimer.time_to_print() and not outspec: - return + if self.major_phase == self.PHASE_PLAN: + msg = _("Creating Plan ({name} {pair}) " "{spinner}").format( + name=self.mfst_fetch.name, + pair=self.mfst_fetch.pair(), + spinner=self._spinner(), + ) + if self.major_phase == self.PHASE_UTILITY: + # note to translators: the position of these strings + # should probably be left alone, as they form part of + # the progress output text. + msg = _("{name} ({fetchpair}) {spinchar}").format( + name=self.mfst_fetch.name, + fetchpair=self.mfst_fetch.pair(), + spinchar=self._spinner(), + ) + self._pe.cprint(msg, sep="", end="", erase=True) + + if outspec.last: + self.__generic_done() + + def _reversion(self, pfmri, outspec): + if not self._ptimer.time_to_print() and not outspec: + return + + if isinstance(pfmri, pkg.fmri.PkgFmri): + stem = pfmri.get_pkg_stem(anarchy=True) + else: + stem = pfmri + + # The first time, emit header. + if outspec.first: + self._pe.cprint( + "{0:38} {1:>13} {2:>13} {3:>11}".format( + _("PKG"), _("Processed"), _("Reversioned"), _("Adjusted") + ) + ) + + s = "{0:<40.40} {1:>11} {2:>13} {3:>11}".format( + stem, + self.reversion_pkgs.pair(), + self.reversion_revs.pair(), + self.reversion_adjs.items, + ) + self._pe.cprint(s, end="", erase=True) + + if outspec.last: + self.__generic_done_newline() + + def _mfst_commit(self, outspec): + if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: + self._up2date() + return + if not self._ptimer.time_to_print(): + return + if self.major_phase == self.PHASE_PLAN: + msg = _("Creating Plan (Committing Manifests): " "{0}").format( + self._spinner() + ) + if self.major_phase == self.PHASE_UTILITY: + msg = _("Committing Manifests {0}").format(self._spinner()) + self._pe.cprint(msg, sep="", end="", erase=True) + return - # - # There are a couple of reasons we might fetch manifests-- - # pkgrecv, pkglint, etc. can all do this. So we adjust - # the output based on the major mode. - # - if self.major_phase == self.PHASE_PLAN: - msg = _("Creating Plan ({name} {pair}) " - "{spinner}").format( - name=self.mfst_fetch.name, - pair=self.mfst_fetch.pair(), - spinner=self._spinner()) - if self.major_phase == self.PHASE_UTILITY: - # note to translators: the position of these strings - # should probably be left alone, as they form part of - # the progress output text. - msg = _("{name} ({fetchpair}) {spinchar}").format( - name=self.mfst_fetch.name, - fetchpair=self.mfst_fetch.pair(), - spinchar=self._spinner()) - self._pe.cprint(msg, sep='', end='', erase=True) - - if outspec.last: - self.__generic_done() - - def _reversion(self, pfmri, outspec): - - if not self._ptimer.time_to_print() and not outspec: - return - - if isinstance(pfmri, pkg.fmri.PkgFmri): - stem = pfmri.get_pkg_stem(anarchy=True) - else: - stem = pfmri - - # The first time, emit header. - if outspec.first: - self._pe.cprint("{0:38} {1:>13} {2:>13} {3:>11}".format( - _("PKG"), _("Processed"), _("Reversioned"), - _("Adjusted"))) - - s = "{0:<40.40} {1:>11} {2:>13} {3:>11}".format( - stem, self.reversion_pkgs.pair(), - self.reversion_revs.pair(), self.reversion_adjs.items) - self._pe.cprint(s, end='', erase=True) - - if outspec.last: - self.__generic_done_newline() - - def _mfst_commit(self, outspec): - if self.purpose == self.PURPOSE_PKG_UPDATE_CHK: - self._up2date() - return - if not self._ptimer.time_to_print(): - return - if self.major_phase == self.PHASE_PLAN: - msg = _("Creating Plan (Committing Manifests): " - "{0}").format(self._spinner()) - if self.major_phase == self.PHASE_UTILITY: - msg = _("Committing Manifests {0}").format( - self._spinner()) - self._pe.cprint(msg, sep='', end='', erase=True) - return - - def _repo_ver_output(self, outspec, repository_scan=False): - """If 'repository_scan' is set and we have no FRMRI set, we emit - a message saying that we're performing a scan if the repository. - If we have no FMRI, we emit a message saying we don't know what - package we're looking at. - - """ - if not self.repo_ver_pkgs.curinfo: - if repository_scan: - pkg_stem = _("Scanning repository " - "(this could take some time)") - else: - pkg_stem = _("Unknown package") - else: - pkg_stem = self.repo_ver_pkgs.curinfo.get_pkg_stem() - if not self._ptimer.time_to_print() and not outspec: - return - if "endpkg" in outspec.changed: - self._pe.cprint("", end='', erase=True) - return - s = "{0:64} {1} {2}".format( - pkg_stem, self.repo_ver_pkgs.pair(), self._spinner()) - self._pe.cprint(s, end='', erase=True) - - def _repo_ver_output_error(self, errors): - self._output_flush() - self._pe.cprint(errors) - - def _repo_fix_output(self, outspec): - s = "{0}".format( self._spinner()) - self._pe.cprint(s, end='', erase=True) - - def _repo_fix_output_error(self, errors): - self._output_flush() - self._pe.cprint(errors) - - def _repo_fix_output_info(self, info): - self._output_flush() - self._pe.cprint(info) - - def _archive_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec: - return - - # The first time, emit header. - if outspec.first: - self._pe.cprint("{0:44} {1:>13} {2:>11}".format( - _("ARCHIVE"), _("FILES"), _("STORE (MB)"))) - - mbs = format_pair("{0:.1f}", self.archive_bytes.items, - self.archive_bytes.goalitems, scale=(1024 * 1024), - targetwidth=5, format2="{0:d}") - s = "{0:<44.44} {1:>11} {2:>11}".format( - self._archive_name, self.archive_items.pair(), mbs) - self._pe.cprint(s, end='', erase=True) - - if outspec.last: - self.__generic_done_newline() - - def _dl_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec.first \ - and not outspec.last and not "startpkg" in outspec.changed \ - and not "endpkg" in outspec.changed: - return - - # The first time, emit header. - if outspec.first: - self._pe.cprint("{0:34} {1:>9} {2:>13} {3:>12} " - "{4:>7}".format(_("DOWNLOAD"), _("PKGS"), - _("FILES"), _("XFER (MB)"), _("SPEED"))) - - if outspec.last: - pkg_name = _("Completed") - speed = self.dl_estimator.get_final_speed() - else: - pkg_name = self.dl_pkgs.curinfo.get_name() - speed = self.dl_estimator.get_speed_estimate() - if len(pkg_name) > 34: - pkg_name = "..." + pkg_name[-30:] - # show speed if greater than 0, otherwise "--" - if speed is not None and speed > 0: - speedstr = self.dl_estimator.format_speed(speed) - else: - speedstr = "--" - - # Use floats unless it makes the field too wide - mbstr = format_pair("{0:.1f}", self.dl_bytes.items, - self.dl_bytes.goalitems, scale=1024.0 * 1024.0, - targetwidth=5, format2="{0:d}") - s = "{0:<34.34} {1:>9} {2:>13} {3:>12} {4:>7}".format( - pkg_name, self.dl_pkgs.pair(), self.dl_files.pair(), - mbstr, speedstr) - self._pe.cprint(s, end='', erase=True) - - if outspec.last: - self.__generic_done_newline() - self.__generic_done_newline() - - def _republish_output(self, outspec): - if not outspec.first and not outspec.last \ - and not self._ptimer.time_to_print(): - return - - # The first time, emit header. - if outspec.first: - self._pe.cprint("{0:40} {1:>12} {2:>11} {3:>11}".format( - _("PROCESS"), _("ITEMS"), _("GET (MB)"), - _("SEND (MB)"))) - - if outspec.last: - pkg_name = "Completed" - else: - pkg_name = self.repub_pkgs.curinfo.get_name() - if len(pkg_name) > 40: - pkg_name = "..." + pkg_name[-37:] - - s = "{0:<40.40} {1:>12} {2:>11} {3:>11}".format( - pkg_name, self.repub_pkgs.pair(), - format_pair("{0:.1f}", self.dl_bytes.items, - self.dl_bytes.goalitems, scale=(1024 * 1024), - targetwidth=5, format2="{0:d}"), - format_pair("{0:.1f}", self.repub_send_bytes.items, - self.repub_send_bytes.goalitems, scale=(1024 * 1024), - targetwidth=5, format2="{0:d}")) - self._pe.cprint(s, erase=True, end='') - - if outspec.last: - self.__generic_done_newline() - self.__generic_done_newline() - - def _print_phases_hdr(self): - if self._phases_hdr_printed: - return - self._pe.cprint("{0:40} {1:>11}".format(_("PHASE"), _("ITEMS"))) - self._phases_hdr_printed = True - - def _act_output(self, outspec, actionitem): - if actionitem.goalitems == 0: - return - # emit header if needed - if outspec.first: - self._print_phases_hdr() - - if not self._ptimer.time_to_print() and \ - not outspec.last and not outspec.first: - return - self._pe.cprint("{0:40} {1:>11}".format( - actionitem.name, actionitem.pair()), end='', erase=True) - if outspec.last: - self.__generic_done_newline() - - def _act_output_all_done(self): - pass + def _repo_ver_output(self, outspec, repository_scan=False): + """If 'repository_scan' is set and we have no FRMRI set, we emit + a message saying that we're performing a scan if the repository. + If we have no FMRI, we emit a message saying we don't know what + package we're looking at. - def _job_output(self, outspec, jobitem): - if not self._ptimer.time_to_print() and not outspec and \ - jobitem == self._jobs_lastjob: - return - self._jobs_lastjob = jobitem - - # emit phases header if needed - if outspec.first: - self._print_phases_hdr() - - spin = "" if outspec.last else self._spinner() - if isinstance(jobitem, GoalTrackerItem): - val = jobitem.pair() - else: - val = _("Done") if outspec.last else _("working") - - self._pe.cprint("{0:40} {1:>11} {2}".format(jobitem.name, - val, spin), end='', erase=True) - if outspec.last: - self.__generic_done_newline() - - def _lint_output(self, outspec): - if not self._ptimer.time_to_print() and not outspec.last: - return - self._pe.cprint("{0:40} {1:>11}".format(self.lintitems.name, - self.lintitems.pair()), end='', erase=True) - if outspec.last: - self.__generic_done() - - def _li_recurse_start_output(self): - if self.linked_pkg_op == pkgdefs.PKG_OP_PUBCHECK: - self.__generic_start( - _("Linked image publisher check")) - return - elif self.linked_pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: - self.__generic_start( - _("Cleaning up hot-fix origins ...")) - return - - def _li_recurse_end_output(self): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - msg = _("{phasename} linked: {numdone} done").format( - phasename=self.li_phase_names[self.major_phase], - numdone=format_pair("{0:d}", self.linked_total, - self.linked_total)) - self._pe.cprint(msg, erase=True) - - def __li_dump_output(self, output): - if not output: - return - lines = output.splitlines() - nlines = len(lines) - for linenum, line in enumerate(lines): - line = misc.force_str(line) - if linenum < nlines - 1: - self._pe.cprint("| " + line) - else: - if lines[linenum].strip() != "": - self._pe.cprint("| " + line) - self._pe.cprint("`") - - def _li_recurse_output_output(self, lin, stdout, stderr): - if not stdout and not stderr: - self._pe.cprint("", erase=True, end='') - return - - self._pe.cprint(_("Linked image '{0}' output:").format(lin), - erase=True) - self.__li_dump_output(stdout) - self.__li_dump_output(stderr) - - def _li_recurse_status_output(self, done): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - - assert self.major_phase in self.li_phase_names, self.major_phase - - running = " ".join([str(i) for i in self.linked_running]) - msg = _("{phase} linked: {numdone} done; " - "{numworking:d} working: {running}").format( - phase=self.li_phase_names[self.major_phase], - numdone=format_pair("{0:d}", done, self.linked_total), - numworking=len(self.linked_running), - running=running) - self._pe.cprint(msg, erase=True) - - self.__linked_spinners = list( - itertools.repeat(0, len(self.linked_running))) - - def _li_recurse_progress_output(self, lin): - if self.linked_pkg_op in [pkgdefs.PKG_OP_PUBCHECK, - pkgdefs.PKG_OP_HOTFIX_CLEANUP]: - return - if not self._ptimer.time_to_print(): - return - # find the index of the child that made progress - i = self.linked_running.index(lin) - - # update that child's spinner - self.__linked_spinners[i] = \ - (self.__linked_spinners[i] + 1) % len(self.__spinner_chars) - spinners = "".join([ - self.__spinner_chars[i] - for i in self.__linked_spinners - ]) - self._pe.cprint(_("Linked progress: {0}").format(spinners), - end='', erase=True) + """ + if not self.repo_ver_pkgs.curinfo: + if repository_scan: + pkg_stem = _( + "Scanning repository " "(this could take some time)" + ) + else: + pkg_stem = _("Unknown package") + else: + pkg_stem = self.repo_ver_pkgs.curinfo.get_pkg_stem() + if not self._ptimer.time_to_print() and not outspec: + return + if "endpkg" in outspec.changed: + self._pe.cprint("", end="", erase=True) + return + s = "{0:64} {1} {2}".format( + pkg_stem, self.repo_ver_pkgs.pair(), self._spinner() + ) + self._pe.cprint(s, end="", erase=True) + + def _repo_ver_output_error(self, errors): + self._output_flush() + self._pe.cprint(errors) + + def _repo_fix_output(self, outspec): + s = "{0}".format(self._spinner()) + self._pe.cprint(s, end="", erase=True) + + def _repo_fix_output_error(self, errors): + self._output_flush() + self._pe.cprint(errors) + + def _repo_fix_output_info(self, info): + self._output_flush() + self._pe.cprint(info) + + def _archive_output(self, outspec): + if not self._ptimer.time_to_print() and not outspec: + return + + # The first time, emit header. + if outspec.first: + self._pe.cprint( + "{0:44} {1:>13} {2:>11}".format( + _("ARCHIVE"), _("FILES"), _("STORE (MB)") + ) + ) + + mbs = format_pair( + "{0:.1f}", + self.archive_bytes.items, + self.archive_bytes.goalitems, + scale=(1024 * 1024), + targetwidth=5, + format2="{0:d}", + ) + s = "{0:<44.44} {1:>11} {2:>11}".format( + self._archive_name, self.archive_items.pair(), mbs + ) + self._pe.cprint(s, end="", erase=True) + + if outspec.last: + self.__generic_done_newline() + + def _dl_output(self, outspec): + if ( + not self._ptimer.time_to_print() + and not outspec.first + and not outspec.last + and not "startpkg" in outspec.changed + and not "endpkg" in outspec.changed + ): + return + + # The first time, emit header. + if outspec.first: + self._pe.cprint( + "{0:34} {1:>9} {2:>13} {3:>12} " + "{4:>7}".format( + _("DOWNLOAD"), + _("PKGS"), + _("FILES"), + _("XFER (MB)"), + _("SPEED"), + ) + ) + + if outspec.last: + pkg_name = _("Completed") + speed = self.dl_estimator.get_final_speed() + else: + pkg_name = self.dl_pkgs.curinfo.get_name() + speed = self.dl_estimator.get_speed_estimate() + if len(pkg_name) > 34: + pkg_name = "..." + pkg_name[-30:] + # show speed if greater than 0, otherwise "--" + if speed is not None and speed > 0: + speedstr = self.dl_estimator.format_speed(speed) + else: + speedstr = "--" + + # Use floats unless it makes the field too wide + mbstr = format_pair( + "{0:.1f}", + self.dl_bytes.items, + self.dl_bytes.goalitems, + scale=1024.0 * 1024.0, + targetwidth=5, + format2="{0:d}", + ) + s = "{0:<34.34} {1:>9} {2:>13} {3:>12} {4:>7}".format( + pkg_name, self.dl_pkgs.pair(), self.dl_files.pair(), mbstr, speedstr + ) + self._pe.cprint(s, end="", erase=True) + + if outspec.last: + self.__generic_done_newline() + self.__generic_done_newline() + + def _republish_output(self, outspec): + if ( + not outspec.first + and not outspec.last + and not self._ptimer.time_to_print() + ): + return + + # The first time, emit header. + if outspec.first: + self._pe.cprint( + "{0:40} {1:>12} {2:>11} {3:>11}".format( + _("PROCESS"), _("ITEMS"), _("GET (MB)"), _("SEND (MB)") + ) + ) + + if outspec.last: + pkg_name = "Completed" + else: + pkg_name = self.repub_pkgs.curinfo.get_name() + if len(pkg_name) > 40: + pkg_name = "..." + pkg_name[-37:] + + s = "{0:<40.40} {1:>12} {2:>11} {3:>11}".format( + pkg_name, + self.repub_pkgs.pair(), + format_pair( + "{0:.1f}", + self.dl_bytes.items, + self.dl_bytes.goalitems, + scale=(1024 * 1024), + targetwidth=5, + format2="{0:d}", + ), + format_pair( + "{0:.1f}", + self.repub_send_bytes.items, + self.repub_send_bytes.goalitems, + scale=(1024 * 1024), + targetwidth=5, + format2="{0:d}", + ), + ) + self._pe.cprint(s, erase=True, end="") + + if outspec.last: + self.__generic_done_newline() + self.__generic_done_newline() + + def _print_phases_hdr(self): + if self._phases_hdr_printed: + return + self._pe.cprint("{0:40} {1:>11}".format(_("PHASE"), _("ITEMS"))) + self._phases_hdr_printed = True + + def _act_output(self, outspec, actionitem): + if actionitem.goalitems == 0: + return + # emit header if needed + if outspec.first: + self._print_phases_hdr() + + if ( + not self._ptimer.time_to_print() + and not outspec.last + and not outspec.first + ): + return + self._pe.cprint( + "{0:40} {1:>11}".format(actionitem.name, actionitem.pair()), + end="", + erase=True, + ) + if outspec.last: + self.__generic_done_newline() + + def _act_output_all_done(self): + pass + + def _job_output(self, outspec, jobitem): + if ( + not self._ptimer.time_to_print() + and not outspec + and jobitem == self._jobs_lastjob + ): + return + self._jobs_lastjob = jobitem + + # emit phases header if needed + if outspec.first: + self._print_phases_hdr() + + spin = "" if outspec.last else self._spinner() + if isinstance(jobitem, GoalTrackerItem): + val = jobitem.pair() + else: + val = _("Done") if outspec.last else _("working") + + self._pe.cprint( + "{0:40} {1:>11} {2}".format(jobitem.name, val, spin), + end="", + erase=True, + ) + if outspec.last: + self.__generic_done_newline() + + def _lint_output(self, outspec): + if not self._ptimer.time_to_print() and not outspec.last: + return + self._pe.cprint( + "{0:40} {1:>11}".format(self.lintitems.name, self.lintitems.pair()), + end="", + erase=True, + ) + if outspec.last: + self.__generic_done() + + def _li_recurse_start_output(self): + if self.linked_pkg_op == pkgdefs.PKG_OP_PUBCHECK: + self.__generic_start(_("Linked image publisher check")) + return + elif self.linked_pkg_op == pkgdefs.PKG_OP_HOTFIX_CLEANUP: + self.__generic_start(_("Cleaning up hot-fix origins ...")) + return + + def _li_recurse_end_output(self): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + msg = _("{phasename} linked: {numdone} done").format( + phasename=self.li_phase_names[self.major_phase], + numdone=format_pair("{0:d}", self.linked_total, self.linked_total), + ) + self._pe.cprint(msg, erase=True) + + def __li_dump_output(self, output): + if not output: + return + lines = output.splitlines() + nlines = len(lines) + for linenum, line in enumerate(lines): + line = misc.force_str(line) + if linenum < nlines - 1: + self._pe.cprint("| " + line) + else: + if lines[linenum].strip() != "": + self._pe.cprint("| " + line) + self._pe.cprint("`") + + def _li_recurse_output_output(self, lin, stdout, stderr): + if not stdout and not stderr: + self._pe.cprint("", erase=True, end="") + return + + self._pe.cprint(_("Linked image '{0}' output:").format(lin), erase=True) + self.__li_dump_output(stdout) + self.__li_dump_output(stderr) + + def _li_recurse_status_output(self, done): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + + assert self.major_phase in self.li_phase_names, self.major_phase + + running = " ".join([str(i) for i in self.linked_running]) + msg = _( + "{phase} linked: {numdone} done; " + "{numworking:d} working: {running}" + ).format( + phase=self.li_phase_names[self.major_phase], + numdone=format_pair("{0:d}", done, self.linked_total), + numworking=len(self.linked_running), + running=running, + ) + self._pe.cprint(msg, erase=True) + + self.__linked_spinners = list( + itertools.repeat(0, len(self.linked_running)) + ) + + def _li_recurse_progress_output(self, lin): + if self.linked_pkg_op in [ + pkgdefs.PKG_OP_PUBCHECK, + pkgdefs.PKG_OP_HOTFIX_CLEANUP, + ]: + return + if not self._ptimer.time_to_print(): + return + # find the index of the child that made progress + i = self.linked_running.index(lin) + + # update that child's spinner + self.__linked_spinners[i] = (self.__linked_spinners[i] + 1) % len( + self.__spinner_chars + ) + spinners = "".join( + [self.__spinner_chars[i] for i in self.__linked_spinners] + ) + self._pe.cprint( + _("Linked progress: {0}").format(spinners), end="", erase=True + ) # @@ -3191,219 +3428,223 @@ def _li_recurse_progress_output(self, lin): # utility. # def test_progress_tracker(t, gofast=False): - # Unused variables (in several loops) pylint: disable=W0612 - import random - - print("Use ctrl-c to skip sections") - - if not gofast: - fast = 1.0 - else: - fast = 0.10 - - global_settings.client_output_verbose = 1 - - dlscript = { - "chrysler/lebaron": [], - "mazda/mx-5": [], - "acura/tsx": [], - "honda/civic-si": [], - "a-very-very-long-package-name-which-will-have-to-be-truncated": [], - } - - for purp in [ProgressTracker.PURPOSE_PKG_UPDATE_CHK, - ProgressTracker.PURPOSE_NORMAL]: - t.set_purpose(purp) - try: - t.refresh_start(4, full_refresh=False) - for x in ["woop", "gub", "zip", "yowee"]: - p = publisher.Publisher(x) - t.refresh_start_pub(p) - time.sleep(0.10 * fast) - t.refresh_progress(x, 1024 * 8) - time.sleep(0.10 * fast) - t.refresh_progress(x, 0) - time.sleep(0.10 * fast) - t.refresh_progress(x, 1024 * 128) - t.refresh_end_pub(p) - t.refresh_done() - - t.cache_catalogs_start() - time.sleep(0.25 * fast) - t.cache_catalogs_done() - - t.load_catalog_cache_start() - time.sleep(0.25 * fast) - t.load_catalog_cache_done() - - except KeyboardInterrupt: - t.flush() - - try: - t.set_major_phase(t.PHASE_PLAN) - planids = sorted([v for k, v in - ProgressTrackerFrontend.__dict__.items() - if k.startswith("PLAN_")]) - t.plan_all_start() - for planid in planids: - r = random.randint(20, 100) - # we always try to set a goal; this will fail - # for ungoaled items, so then we try again - # without a goal. This saves us the complicated - # task of inspecting the tracker-- and - # multiprogress makes such inspection much - # harder. - try: - t.plan_start(planid, goal=r) - except RuntimeError: - t.plan_start(planid) - for x in range(0, r): - t.plan_add_progress(planid) - time.sleep(0.02 * fast) - t.plan_done(planid) - t.plan_all_done() - except KeyboardInterrupt: - t.flush() - - try: - t.manifest_fetch_start(len(dlscript)) - for pkgnm in dlscript: - t.manifest_fetch_progress( - completion=False) - time.sleep(0.05 * fast) - t.manifest_fetch_progress( - completion=True) - time.sleep(0.05 * fast) - for pkgnm in dlscript: - t.manifest_commit() - time.sleep(0.05 * fast) - t.manifest_fetch_done() - except KeyboardInterrupt: - t.flush() - - perpkgfiles = 50 - pkggoalfiles = len(dlscript) * perpkgfiles - pkggoalbytes = 0 - filesizemax = 250000 - hunkmax = 8192 - approx_time = 5.0 * fast # how long we want the dl to take - # invent a list of random download chunks. - for pkgname, filelist in six.iteritems(dlscript): - for f in range(0, perpkgfiles): - filesize = random.randint(0, filesizemax) - hunks = [] - while filesize > 0: - delta = min(filesize, - random.randint(0, hunkmax)) - hunks.append(delta) - filesize -= delta - pkggoalbytes += delta - filelist.append(hunks) - - # pylint is picky about this message: - # old-division; pylint: disable=W1619 - pauseperfile = approx_time / pkggoalfiles - + # Unused variables (in several loops) pylint: disable=W0612 + import random + + print("Use ctrl-c to skip sections") + + if not gofast: + fast = 1.0 + else: + fast = 0.10 + + global_settings.client_output_verbose = 1 + + dlscript = { + "chrysler/lebaron": [], + "mazda/mx-5": [], + "acura/tsx": [], + "honda/civic-si": [], + "a-very-very-long-package-name-which-will-have-to-be-truncated": [], + } + + for purp in [ + ProgressTracker.PURPOSE_PKG_UPDATE_CHK, + ProgressTracker.PURPOSE_NORMAL, + ]: + t.set_purpose(purp) try: - t.download_set_goal(len(dlscript), pkggoalfiles, pkggoalbytes) - n = 0 - for pkgname, pkgfiles in six.iteritems(dlscript): - fmri = pkg.fmri.PkgFmri(pkgname) - t.download_start_pkg(fmri) - for pkgfile in pkgfiles: - for hunk in pkgfile: - t.download_add_progress(0, hunk) - t.download_add_progress(1, 0) - time.sleep(pauseperfile) - t.download_end_pkg(fmri) - t.download_done() - except KeyboardInterrupt: - t.flush() + t.refresh_start(4, full_refresh=False) + for x in ["woop", "gub", "zip", "yowee"]: + p = publisher.Publisher(x) + t.refresh_start_pub(p) + time.sleep(0.10 * fast) + t.refresh_progress(x, 1024 * 8) + time.sleep(0.10 * fast) + t.refresh_progress(x, 0) + time.sleep(0.10 * fast) + t.refresh_progress(x, 1024 * 128) + t.refresh_end_pub(p) + t.refresh_done() + + t.cache_catalogs_start() + time.sleep(0.25 * fast) + t.cache_catalogs_done() + + t.load_catalog_cache_start() + time.sleep(0.25 * fast) + t.load_catalog_cache_done() - try: - t.reset_download() - t.republish_set_goal(len(dlscript), pkggoalbytes, pkggoalbytes) - n = 0 - for pkgname, pkgfiles in six.iteritems(dlscript): - fmri = pkg.fmri.PkgFmri(pkgname) - t.republish_start_pkg(fmri) - for pkgfile in pkgfiles: - for hunk in pkgfile: - t.download_add_progress(0, hunk) - t.upload_add_progress(hunk) - t.download_add_progress(1, 0) - time.sleep(pauseperfile) - t.republish_end_pkg(fmri) - t.republish_done() except KeyboardInterrupt: - t.flush() + t.flush() try: - t.reset_download() - t.archive_set_goal("testarchive", pkggoalfiles, pkggoalbytes) - n = 0 - for pkgname, pkgfiles in six.iteritems(dlscript): - for pkgfile in pkgfiles: - for hunk in pkgfile: - t.archive_add_progress(0, hunk) - t.archive_add_progress(1, 0) - time.sleep(pauseperfile) - t.archive_done() - except KeyboardInterrupt: - t.flush() - try: - t.set_major_phase(t.PHASE_EXECUTE) - - nactions = 100 - t.actions_set_goal(t.ACTION_REMOVE, nactions) - t.actions_set_goal(t.ACTION_INSTALL, nactions) - t.actions_set_goal(t.ACTION_UPDATE, nactions) - for act in [t.ACTION_REMOVE, t.ACTION_INSTALL, t.ACTION_UPDATE]: - for x in range(0, nactions): - t.actions_add_progress(act) - time.sleep(0.0015 * fast) - t.actions_done(act) - t.actions_all_done() + t.set_major_phase(t.PHASE_PLAN) + planids = sorted( + [ + v + for k, v in ProgressTrackerFrontend.__dict__.items() + if k.startswith("PLAN_") + ] + ) + t.plan_all_start() + for planid in planids: + r = random.randint(20, 100) + # we always try to set a goal; this will fail + # for ungoaled items, so then we try again + # without a goal. This saves us the complicated + # task of inspecting the tracker-- and + # multiprogress makes such inspection much + # harder. + try: + t.plan_start(planid, goal=r) + except RuntimeError: + t.plan_start(planid) + for x in range(0, r): + t.plan_add_progress(planid) + time.sleep(0.02 * fast) + t.plan_done(planid) + t.plan_all_done() except KeyboardInterrupt: - t.flush() + t.flush() try: - t.set_major_phase(t.PHASE_FINALIZE) - for jobname, job in ProgressTrackerFrontend.__dict__.items(): - if not jobname.startswith("JOB_"): - continue - r = random.randint(5, 30) - # we always try to set a goal; this will fail for - # ungoaled items, so then we try again without a goal. - # This saves us the complicated task of inspecting the - # tracker-- and multiprogress makes such inspection - # much harder. - try: - t.job_start(job, goal=r) - except RuntimeError: - t.job_start(job) - - for x in range(0, r): - t.job_add_progress(job) - time.sleep(0.02 * fast) - t.job_done(job) + t.manifest_fetch_start(len(dlscript)) + for pkgnm in dlscript: + t.manifest_fetch_progress(completion=False) + time.sleep(0.05 * fast) + t.manifest_fetch_progress(completion=True) + time.sleep(0.05 * fast) + for pkgnm in dlscript: + t.manifest_commit() + time.sleep(0.05 * fast) + t.manifest_fetch_done() except KeyboardInterrupt: - t.flush() + t.flush() + + perpkgfiles = 50 + pkggoalfiles = len(dlscript) * perpkgfiles + pkggoalbytes = 0 + filesizemax = 250000 + hunkmax = 8192 + approx_time = 5.0 * fast # how long we want the dl to take + # invent a list of random download chunks. + for pkgname, filelist in six.iteritems(dlscript): + for f in range(0, perpkgfiles): + filesize = random.randint(0, filesizemax) + hunks = [] + while filesize > 0: + delta = min(filesize, random.randint(0, hunkmax)) + hunks.append(delta) + filesize -= delta + pkggoalbytes += delta + filelist.append(hunks) + + # pylint is picky about this message: + # old-division; pylint: disable=W1619 + pauseperfile = approx_time / pkggoalfiles + + try: + t.download_set_goal(len(dlscript), pkggoalfiles, pkggoalbytes) + n = 0 + for pkgname, pkgfiles in six.iteritems(dlscript): + fmri = pkg.fmri.PkgFmri(pkgname) + t.download_start_pkg(fmri) + for pkgfile in pkgfiles: + for hunk in pkgfile: + t.download_add_progress(0, hunk) + t.download_add_progress(1, 0) + time.sleep(pauseperfile) + t.download_end_pkg(fmri) + t.download_done() + except KeyboardInterrupt: + t.flush() + + try: + t.reset_download() + t.republish_set_goal(len(dlscript), pkggoalbytes, pkggoalbytes) + n = 0 + for pkgname, pkgfiles in six.iteritems(dlscript): + fmri = pkg.fmri.PkgFmri(pkgname) + t.republish_start_pkg(fmri) + for pkgfile in pkgfiles: + for hunk in pkgfile: + t.download_add_progress(0, hunk) + t.upload_add_progress(hunk) + t.download_add_progress(1, 0) + time.sleep(pauseperfile) + t.republish_end_pkg(fmri) + t.republish_done() + except KeyboardInterrupt: + t.flush() + + try: + t.reset_download() + t.archive_set_goal("testarchive", pkggoalfiles, pkggoalbytes) + n = 0 + for pkgname, pkgfiles in six.iteritems(dlscript): + for pkgfile in pkgfiles: + for hunk in pkgfile: + t.archive_add_progress(0, hunk) + t.archive_add_progress(1, 0) + time.sleep(pauseperfile) + t.archive_done() + except KeyboardInterrupt: + t.flush() + try: + t.set_major_phase(t.PHASE_EXECUTE) + + nactions = 100 + t.actions_set_goal(t.ACTION_REMOVE, nactions) + t.actions_set_goal(t.ACTION_INSTALL, nactions) + t.actions_set_goal(t.ACTION_UPDATE, nactions) + for act in [t.ACTION_REMOVE, t.ACTION_INSTALL, t.ACTION_UPDATE]: + for x in range(0, nactions): + t.actions_add_progress(act) + time.sleep(0.0015 * fast) + t.actions_done(act) + t.actions_all_done() + except KeyboardInterrupt: + t.flush() + + try: + t.set_major_phase(t.PHASE_FINALIZE) + for jobname, job in ProgressTrackerFrontend.__dict__.items(): + if not jobname.startswith("JOB_"): + continue + r = random.randint(5, 30) + # we always try to set a goal; this will fail for + # ungoaled items, so then we try again without a goal. + # This saves us the complicated task of inspecting the + # tracker-- and multiprogress makes such inspection + # much harder. + try: + t.job_start(job, goal=r) + except RuntimeError: + t.job_start(job) + + for x in range(0, r): + t.job_add_progress(job) + time.sleep(0.02 * fast) + t.job_done(job) + except KeyboardInterrupt: + t.flush() + + try: + # do some other things to drive up test coverage. + t.flush() + + # test lint + for phase in [t.LINT_PHASETYPE_SETUP, t.LINT_PHASETYPE_EXECUTE]: + t.lint_next_phase(2, phase) + for x in range(0, 100): + t.lint_add_progress() + time.sleep(0.02 * fast) + t.lint_done() + except KeyboardInterrupt: + t.flush() + return - try: - # do some other things to drive up test coverage. - t.flush() - - # test lint - for phase in [t.LINT_PHASETYPE_SETUP, t.LINT_PHASETYPE_EXECUTE]: - t.lint_next_phase(2, phase) - for x in range(0, 100): - t.lint_add_progress() - time.sleep(0.02 * fast) - t.lint_done() - except KeyboardInterrupt: - t.flush() - return # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/publisher.py b/src/modules/client/publisher.py index 857ba8288..81476fbc4 100644 --- a/src/modules/client/publisher.py +++ b/src/modules/client/publisher.py @@ -55,8 +55,13 @@ from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import padding from io import BytesIO -from six.moves.urllib.parse import quote, urlsplit, urlparse, urlunparse, \ - ParseResult +from six.moves.urllib.parse import ( + quote, + urlsplit, + urlparse, + urlunparse, + ParseResult, +) from six.moves.urllib.request import url2pathname import pkg.catalog @@ -70,6 +75,7 @@ from pkg.client import global_settings from pkg.client.debugvalues import DebugValues + logger = global_settings.logger from pkg.misc import EmptyDict, EmptyI, SIGNATURE_POLICY, DictProperty @@ -92,7 +98,7 @@ # Supported Protocol Schemes SUPPORTED_SCHEMES = set(("file", "http", "https")) -SUPPORTED_PROXY_SCHEMES = ("http") +SUPPORTED_PROXY_SCHEMES = "http" # SSL Protocol Schemes SSL_SCHEMES = set(("https",)) @@ -111,9 +117,17 @@ # and vice versa. EXTENSIONS_VALUES = { x509.BasicConstraints: ["ca", "path_length"], - x509.KeyUsage: ["digital_signature", "content_commitment", - "key_encipherment", "data_encipherment", "key_agreement", "key_cert_sign", - "crl_sign", "encipher_only", "decipher_only"] + x509.KeyUsage: [ + "digital_signature", + "content_commitment", + "key_encipherment", + "data_encipherment", + "key_agreement", + "key_cert_sign", + "crl_sign", + "encipher_only", + "decipher_only", + ], } # Only listed extension values (properties) here can have a value True set in a @@ -121,7 +135,7 @@ # treated as unsupported. SUPPORTED_EXTENSION_VALUES = { x509.BasicConstraints: ("ca", "path_length"), - x509.KeyUsage: ("digital_signature", "key_cert_sign", "crl_sign") + x509.KeyUsage: ("digital_signature", "key_cert_sign", "crl_sign"), } # These dictionaries map uses into their extensions. @@ -145,981 +159,1139 @@ # system-repository. SYSREPO_PROXY = "" -class RepositoryURI(object): - """Class representing a repository URI and any transport-related - information.""" - - # These properties are declared here so that they show up in the pydoc - # documentation as private, and for clarity in the property declarations - # found near the end of the class definition. - __priority = None - __proxies = None - __ssl_cert = None - __ssl_key = None - __trailing_slash = None - __uri = None - __disabled = False - __system = False - - # Used to store the id of the original object this one was copied - # from during __copy__. - _source_object_id = None - - def __init__(self, uri, priority=None, ssl_cert=None, ssl_key=None, - trailing_slash=True, proxy=None, system=False, proxies=None, - disabled=False): - - - # Must set first. - self.__trailing_slash = trailing_slash - self.__scheme = None - self.__netloc = None - self.__proxies = [] - - # Note that the properties set here are intentionally lacking - # the '__' prefix which means assignment will occur using the - # get/set methods declared for the property near the end of - # the class definition. - self.priority = priority - self.uri = uri - self.ssl_cert = ssl_cert - self.ssl_key = ssl_key - self.disabled = disabled - # The proxy parameter is deprecated and remains for backwards - # compatibity, for now. If we get given both, then we must - # complain - this error is for internal use only. - if proxy and proxies: - raise api_errors.PublisherError("Both 'proxies' and " - "'proxy' values were used to create a " - "RepositoryURI object.") - - if proxy: - self.proxies = [ProxyURI(proxy)] - if proxies: - self.proxies = proxies - self.system = system - - def __copy__(self): - uri = RepositoryURI(self.__uri, priority=self.__priority, - ssl_cert=self.__ssl_cert, ssl_key=self.__ssl_key, - trailing_slash=self.__trailing_slash, - proxies=self.__proxies, system=self.system, - disabled=self.__disabled) - uri._source_object_id = id(self) - return uri - - def __eq__(self, other): - if isinstance(other, RepositoryURI): - return self.uri == other.uri - if isinstance(other, str): - return self.uri == other - return False - - def __ne__(self, other): - if isinstance(other, RepositoryURI): - return self.uri != other.uri - if isinstance(other, str): - return self.uri != other - return True - - __hash__ = object.__hash__ - - def __lt__(self, other): - if not other: - return False - if not isinstance(other, RepositoryURI): - other = RepositoryURI(other) - return self.uri < other.uri - - def __gt__(self, other): - if not other: - return True - if not isinstance(other, RepositoryURI): - other = RepositoryURI(other) - return self.uri > other.uri - - def __le__(self, other): - return self == other or self < other - - def __ge__(self, other): - return self == other or self > other - - def __set_disabled(self, disable): - if self.system: - raise api_errors.ModifyingSyspubException(_("Cannot " - "enable or disable origin(s) for a system " - "publisher")) - if not isinstance(disable, bool): - raise api_errors.BadRepositoryAttributeValue( - "disabled", value=disable) - if not disable: - self.__disabled = False - else: - self.__disabled = True - - def __set_system(self, system): - if not isinstance(system, bool): - raise api_errors.BadRepositoryAttributeValue( - "system", value=system) - if not system: - self.__system = False - else: - self.__system = True - - def __set_priority(self, value): - if value is not None: - try: - value = int(value) - except (TypeError, ValueError): - raise api_errors.BadRepositoryURIPriority(value) - self.__priority = value - - def __get_proxy(self): - if not self.__proxies: - return None - else: - return self.__proxies[0].uri - def __set_proxy(self, proxy): - if not proxy: - return - if not isinstance(proxy, ProxyURI): - p = ProxyURI(proxy) - else: - p = proxy +class RepositoryURI(object): + """Class representing a repository URI and any transport-related + information.""" + + # These properties are declared here so that they show up in the pydoc + # documentation as private, and for clarity in the property declarations + # found near the end of the class definition. + __priority = None + __proxies = None + __ssl_cert = None + __ssl_key = None + __trailing_slash = None + __uri = None + __disabled = False + __system = False + + # Used to store the id of the original object this one was copied + # from during __copy__. + _source_object_id = None + + def __init__( + self, + uri, + priority=None, + ssl_cert=None, + ssl_key=None, + trailing_slash=True, + proxy=None, + system=False, + proxies=None, + disabled=False, + ): + # Must set first. + self.__trailing_slash = trailing_slash + self.__scheme = None + self.__netloc = None + self.__proxies = [] + + # Note that the properties set here are intentionally lacking + # the '__' prefix which means assignment will occur using the + # get/set methods declared for the property near the end of + # the class definition. + self.priority = priority + self.uri = uri + self.ssl_cert = ssl_cert + self.ssl_key = ssl_key + self.disabled = disabled + # The proxy parameter is deprecated and remains for backwards + # compatibity, for now. If we get given both, then we must + # complain - this error is for internal use only. + if proxy and proxies: + raise api_errors.PublisherError( + "Both 'proxies' and " + "'proxy' values were used to create a " + "RepositoryURI object." + ) - self.__proxies = [p] + if proxy: + self.proxies = [ProxyURI(proxy)] + if proxies: + self.proxies = proxies + self.system = system + + def __copy__(self): + uri = RepositoryURI( + self.__uri, + priority=self.__priority, + ssl_cert=self.__ssl_cert, + ssl_key=self.__ssl_key, + trailing_slash=self.__trailing_slash, + proxies=self.__proxies, + system=self.system, + disabled=self.__disabled, + ) + uri._source_object_id = id(self) + return uri + + def __eq__(self, other): + if isinstance(other, RepositoryURI): + return self.uri == other.uri + if isinstance(other, str): + return self.uri == other + return False + + def __ne__(self, other): + if isinstance(other, RepositoryURI): + return self.uri != other.uri + if isinstance(other, str): + return self.uri != other + return True + + __hash__ = object.__hash__ + + def __lt__(self, other): + if not other: + return False + if not isinstance(other, RepositoryURI): + other = RepositoryURI(other) + return self.uri < other.uri + + def __gt__(self, other): + if not other: + return True + if not isinstance(other, RepositoryURI): + other = RepositoryURI(other) + return self.uri > other.uri + + def __le__(self, other): + return self == other or self < other + + def __ge__(self, other): + return self == other or self > other + + def __set_disabled(self, disable): + if self.system: + raise api_errors.ModifyingSyspubException( + _( + "Cannot " + "enable or disable origin(s) for a system " + "publisher" + ) + ) + if not isinstance(disable, bool): + raise api_errors.BadRepositoryAttributeValue( + "disabled", value=disable + ) + if not disable: + self.__disabled = False + else: + self.__disabled = True + + def __set_system(self, system): + if not isinstance(system, bool): + raise api_errors.BadRepositoryAttributeValue("system", value=system) + if not system: + self.__system = False + else: + self.__system = True + + def __set_priority(self, value): + if value is not None: + try: + value = int(value) + except (TypeError, ValueError): + raise api_errors.BadRepositoryURIPriority(value) + self.__priority = value + + def __get_proxy(self): + if not self.__proxies: + return None + else: + return self.__proxies[0].uri + + def __set_proxy(self, proxy): + if not proxy: + return + if not isinstance(proxy, ProxyURI): + p = ProxyURI(proxy) + else: + p = proxy + + self.__proxies = [p] + + def __set_proxies(self, proxies): + for proxy in proxies: + if not isinstance(proxy, ProxyURI): + raise api_errors.BadRepositoryAttributeValue( + "proxies", value=proxy + ) + + if proxies and self.scheme == "file": + raise api_errors.UnsupportedRepositoryURIAttribute( + "proxies", scheme=self.scheme + ) - def __set_proxies(self, proxies): + if not (isinstance(proxies, list) or isinstance(proxies, tuple)): + raise api_errors.BadRepositoryAttributeValue( + "proxies", value=proxies + ) - for proxy in proxies: - if not isinstance(proxy, ProxyURI): - raise api_errors.BadRepositoryAttributeValue( - "proxies", value=proxy) + # for now, we only support a single proxy per RepositoryURI + if len(proxies) > 1: + raise api_errors.BadRepositoryAttributeValue( + "proxies", value=proxies + ) - if proxies and self.scheme == "file": - raise api_errors.UnsupportedRepositoryURIAttribute( - "proxies", scheme=self.scheme) + if proxies: + self.__proxies = proxies + else: + self.__proxies = [] - if not (isinstance(proxies, list) or - isinstance(proxies, tuple)): - raise api_errors.BadRepositoryAttributeValue( - "proxies", value=proxies) + def __set_ssl_cert(self, filename): + if self.scheme not in SSL_SCHEMES and filename: + raise api_errors.UnsupportedRepositoryURIAttribute( + "ssl_cert", scheme=self.scheme + ) + if filename: + if not isinstance(filename, six.string_types): + raise api_errors.BadRepositoryAttributeValue( + "ssl_cert", value=filename + ) + filename = os.path.normpath(filename) + if filename == "": + filename = None + self.__ssl_cert = filename + + def __set_ssl_key(self, filename): + if self.scheme not in SSL_SCHEMES and filename: + raise api_errors.UnsupportedRepositoryURIAttribute( + "ssl_key", scheme=self.scheme + ) + if filename: + if not isinstance(filename, six.string_types): + raise api_errors.BadRepositoryAttributeValue( + "ssl_key", value=filename + ) + filename = os.path.normpath(filename) + if filename == "": + filename = None + self.__ssl_key = filename + + def __set_trailing_slash(self, value): + if value not in (True, False): + raise api_errors.BadRepositoryAttributeValue( + "trailing_slash", value=value + ) + self.__trailing_slash = value + + def __set_uri(self, uri): + if uri is None: + raise api_errors.BadRepositoryURI(uri) + + # if we're setting the URI to an existing value, do nothing. + if uri == self.__uri: + return + + # This is not ideal, but determining whether we're operating + # on a ProxyURI saves us duplicating code in that class, + # which we would otherwise need, due to __protected members + # here. + if isinstance(self, ProxyURI): + is_proxy = True + else: + is_proxy = False + + # Decompose URI to verify attributes. + scheme, netloc, path, params, query = urlsplit(uri, allow_fragments=0) + + self.__scheme = scheme.lower() + self.__netloc = netloc + + # The set of currently supported protocol schemes. + if is_proxy and self.__scheme not in SUPPORTED_PROXY_SCHEMES: + raise api_errors.UnsupportedProxyURI(uri) + else: + if self.__scheme not in SUPPORTED_SCHEMES: + raise api_errors.UnsupportedRepositoryURI(uri) + + # XXX valid_pub_url's check isn't quite right and could prevent + # usage of IDNs (international domain names). + if ( + self.__scheme.startswith("http") and not netloc + ) or not misc.valid_pub_url(uri, proxy=is_proxy): + raise api_errors.BadRepositoryURI(uri) + + if self.__scheme == "file" and netloc: + raise api_errors.BadRepositoryURI(uri) + + # Normalize URI scheme. + uri = uri.replace(scheme, self.__scheme, 1) + + if self.__trailing_slash: + uri = uri.rstrip("/") + uri = misc.url_affix_trailing_slash(uri) + + if self.__scheme not in SSL_SCHEMES: + self.__ssl_cert = None + self.__ssl_key = None + + self.__uri = uri + + def _override_uri(self, uri): + """Allow the __uri field of the object to be overridden in + special cases.""" + if uri not in [None, SYSREPO_PROXY]: + raise api_errors.BadRepositoryURI(uri) + self.__uri = uri + + def __str__(self): + return str(self.__uri) + + def change_scheme(self, new_scheme): + """Change the scheme of this uri.""" + + assert self.__uri + scheme, netloc, path, params, query, fragment = urlparse( + self.__uri, allow_fragments=False + ) + if new_scheme == scheme: + return + self.uri = urlunparse( + (new_scheme, netloc, path, params, query, fragment) + ) + + def get_host(self): + """Get the host and port of this URI if it's a http uri.""" + + scheme, netloc, path, params, query, fragment = urlparse( + self.__uri, allow_fragments=0 + ) + if scheme != "file": + return netloc + return "" + + def get_pathname(self): + """Returns the URI path as a pathname if the URI is a file + URI or '' otherwise.""" + + scheme, netloc, path, params, query, fragment = urlparse( + self.__uri, allow_fragments=0 + ) + if scheme == "file": + return url2pathname(path) + return "" + + disabled = property( + lambda self: self.__disabled, + __set_disabled, + None, + "A boolean value indicating whether this repository URI should be " + "used for packaging operations.", + ) + + ssl_cert = property( + lambda self: self.__ssl_cert, + __set_ssl_cert, + None, + "The absolute pathname of a PEM-encoded SSL certificate file.", + ) + + ssl_key = property( + lambda self: self.__ssl_key, + __set_ssl_key, + None, + "The absolute pathname of a PEM-encoded SSL key file.", + ) + + uri = property( + lambda self: self.__uri, + __set_uri, + None, + "The URI used to access a repository.", + ) + + priority = property( + lambda self: self.__priority, + __set_priority, + None, + "An integer value representing the importance of this repository " + "URI relative to others.", + ) + + proxy = property( + __get_proxy, + __set_proxy, + None, + "The proxy to use to " "access this repository.", + ) + + proxies = property( + lambda self: self.__proxies, + __set_proxies, + None, + "A list of proxies that can be used to access this repository." + "At runtime, a $http_proxy environment variable might override this.", + ) + + system = property( + lambda self: self.__system, + __set_system, + None, + "A boolean value indicating whether this repository URI is for" + "a system repository.", + ) + + @property + def scheme(self): + """The URI scheme.""" + if not self.__uri: + return "" + return urlsplit(self.__uri, allow_fragments=0)[0] + + trailing_slash = property( + lambda self: self.__trailing_slash, + __set_trailing_slash, + None, + "A boolean value indicating whether any URI provided for this " + "object should have a trailing slash appended when setting the " + "URI property.", + ) - # for now, we only support a single proxy per RepositoryURI - if len(proxies) > 1: - raise api_errors.BadRepositoryAttributeValue( - "proxies", value=proxies) - if proxies: - self.__proxies = proxies - else: - self.__proxies = [] - - def __set_ssl_cert(self, filename): - if self.scheme not in SSL_SCHEMES and filename: - raise api_errors.UnsupportedRepositoryURIAttribute( - "ssl_cert", scheme=self.scheme) - if filename: - if not isinstance(filename, six.string_types): - raise api_errors.BadRepositoryAttributeValue( - "ssl_cert", value=filename) - filename = os.path.normpath(filename) - if filename == "": - filename = None - self.__ssl_cert = filename - - def __set_ssl_key(self, filename): - if self.scheme not in SSL_SCHEMES and filename: - raise api_errors.UnsupportedRepositoryURIAttribute( - "ssl_key", scheme=self.scheme) - if filename: - if not isinstance(filename, six.string_types): - raise api_errors.BadRepositoryAttributeValue( - "ssl_key", value=filename) - filename = os.path.normpath(filename) - if filename == "": - filename = None - self.__ssl_key = filename - - def __set_trailing_slash(self, value): - if value not in (True, False): - raise api_errors.BadRepositoryAttributeValue( - "trailing_slash", value=value) - self.__trailing_slash = value - - def __set_uri(self, uri): - if uri is None: - raise api_errors.BadRepositoryURI(uri) - - # if we're setting the URI to an existing value, do nothing. - if uri == self.__uri: - return - - # This is not ideal, but determining whether we're operating - # on a ProxyURI saves us duplicating code in that class, - # which we would otherwise need, due to __protected members - # here. - if isinstance(self, ProxyURI): - is_proxy = True - else: - is_proxy = False - - # Decompose URI to verify attributes. - scheme, netloc, path, params, query = \ - urlsplit(uri, allow_fragments=0) +class ProxyURI(RepositoryURI): + """A class to represent the URI of a proxy. The 'uri' value can be + 'None' if 'system' is set to True.""" + + def __init__(self, uri, system=False): + self.__system = None + self.system = system + if not system: + self.uri = uri + + def __set_system(self, value): + """A property to specify whether we should use the system + publisher as the proxy. Note that this method modifies the + 'uri' property when set or cleared.""" + if value not in (True, False): + raise api_errors.BadRepositoryAttributeValue("system", value=value) + self.__system = value + if value: + # Set a special value for the uri, intentionally an + # invalid URI which should get caught by any consumers + # using it by mistake. This also allows us to reuse + # the __eq__, __cmp__, etc. methods from the parent + # (where there is no public way of setting the URI to + # SYSREPO_PROXY, '') + self._override_uri(SYSREPO_PROXY) + else: + self._override_uri(None) + + def __unsupported(self, value): + """A method used to prevent certain properties defined in the + parent class from being set on ProxyURI objects.""" + + # We don't expect this string to be exposed to users. + raise ValueError( + "This property cannot be set to {0} on a " + "ProxyURI object.".format(value) + ) + + system = property( + lambda self: self.__system, + __set_system, + None, + "True, if we should use the system publisher as a proxy.", + ) + + # Ensure we can't set any of the following properties. + proxies = property( + lambda self: None, + __unsupported, + None, + "proxies is an invalid property for ProxyURI properties", + ) + + ssl_cert = property( + lambda self: None, + __unsupported, + None, + "ssl_cert is an invalid property for ProxyURI properties", + ) + + ssl_key = property( + lambda self: None, + __unsupported, + None, + "ssl_key is an invalid property for ProxyURI properties", + ) + + priority = property( + lambda self: None, + __unsupported, + None, + "priority is an invalid property for ProxyURI properties", + ) + + trailing_slash = property( + lambda self: None, + __unsupported, + None, + "trailing_slash is an invalid property for ProxyURI properties", + ) - self.__scheme = scheme.lower() - self.__netloc = netloc - # The set of currently supported protocol schemes. - if is_proxy and self.__scheme not in \ - SUPPORTED_PROXY_SCHEMES: - raise api_errors.UnsupportedProxyURI(uri) - else: - if self.__scheme not in SUPPORTED_SCHEMES: - raise api_errors.UnsupportedRepositoryURI(uri) - - # XXX valid_pub_url's check isn't quite right and could prevent - # usage of IDNs (international domain names). - if (self.__scheme.startswith("http") and not netloc) or \ - not misc.valid_pub_url(uri, proxy=is_proxy): - raise api_errors.BadRepositoryURI(uri) - - if self.__scheme == "file" and netloc: - raise api_errors.BadRepositoryURI(uri) - - # Normalize URI scheme. - uri = uri.replace(scheme, self.__scheme, 1) - - if self.__trailing_slash: - uri = uri.rstrip("/") - uri = misc.url_affix_trailing_slash(uri) - - if self.__scheme not in SSL_SCHEMES: - self.__ssl_cert = None - self.__ssl_key = None - - self.__uri = uri - - def _override_uri(self, uri): - """Allow the __uri field of the object to be overridden in - special cases.""" - if uri not in [None, SYSREPO_PROXY]: - raise api_errors.BadRepositoryURI(uri) - self.__uri = uri - - def __str__(self): - return str(self.__uri) - - def change_scheme(self, new_scheme): - """Change the scheme of this uri.""" - - assert self.__uri - scheme, netloc, path, params, query, fragment = \ - urlparse(self.__uri, allow_fragments=False) - if new_scheme == scheme: - return - self.uri = urlunparse( - (new_scheme, netloc, path, params, query, fragment)) - - def get_host(self): - """Get the host and port of this URI if it's a http uri.""" - - scheme, netloc, path, params, query, fragment = \ - urlparse(self.__uri, allow_fragments=0) - if scheme != "file": - return netloc - return "" - - def get_pathname(self): - """Returns the URI path as a pathname if the URI is a file - URI or '' otherwise.""" - - scheme, netloc, path, params, query, fragment = \ - urlparse(self.__uri, allow_fragments=0) - if scheme == "file": - return url2pathname(path) - return "" - - disabled = property(lambda self: self.__disabled, __set_disabled, None, - "A boolean value indicating whether this repository URI should be " - "used for packaging operations.") - - ssl_cert = property(lambda self: self.__ssl_cert, __set_ssl_cert, None, - "The absolute pathname of a PEM-encoded SSL certificate file.") - - ssl_key = property(lambda self: self.__ssl_key, __set_ssl_key, None, - "The absolute pathname of a PEM-encoded SSL key file.") - - uri = property(lambda self: self.__uri, __set_uri, None, - "The URI used to access a repository.") - - priority = property(lambda self: self.__priority, __set_priority, None, - "An integer value representing the importance of this repository " - "URI relative to others.") - - proxy = property(__get_proxy, __set_proxy, None, "The proxy to use to " - "access this repository.") - - proxies = property(lambda self: self.__proxies, __set_proxies, None, - "A list of proxies that can be used to access this repository." - "At runtime, a $http_proxy environment variable might override this." +class TransportRepoURI(RepositoryURI): + """A TransportRepoURI allows for multiple representations of a given + RepositoryURI, each with different properties. + + One RepositoryURI could be represented by several TransportRepoURIs, + used to allow the transport to properly track repo statistics for + for each discrete path to a given URI, perhaps using different proxies + or trying one of several SSL key/cert pairs.""" + + def __init__( + self, + uri, + priority=None, + ssl_cert=None, + ssl_key=None, + trailing_slash=True, + proxy=None, + system=False, + ): + # Must set first. + self.__proxy = None + self.__runtime_proxy = None + self.proxy = proxy + + RepositoryURI.__init__( + self, + uri, + priority=priority, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + trailing_slash=trailing_slash, + system=system, + ) + + def __eq__(self, other): + if isinstance(other, TransportRepoURI): + return self.uri == other.uri and self.proxy == other.proxy + if isinstance(other, six.string_types): + return self.uri == other and self.proxy == None + return False + + def __ne__(self, other): + if isinstance(other, TransportRepoURI): + return self.uri != other.uri or self.proxy != other.proxy + if isinstance(other, six.string_types): + return self.uri != other or self.proxy != None + return True + + __hash__ = object.__hash__ + + def __lt__(self, other): + if not other: + return False + if isinstance(other, six.string_types): + other = TransportRepoURI(other) + elif not isinstance(other, TransportRepoURI): + return False + if self.uri < other.uri: + return True + if self.uri != other.uri: + return False + return self.proxy < other.proxy + + def __gt__(self, other): + if not other: + return True + if isinstance(other, six.string_types): + other = TransportRepoURI(other) + elif not isinstance(other, TransportRepoURI): + return True + if self.uri > other.uri: + return True + if self.uri != other.uri: + return False + return self.proxy > other.proxy + + def __le__(self, other): + return self == other or self < other + + def __ge__(self, other): + return self == other or self > other + + def key(self): + """Returns a value that can be used to identify this RepoURI + uniquely for the transport system. Normally, this would be done + using __hash__() however, TransportRepoURI objects are not + guaranteed to be immutable. + + The key is a (uri, proxy) tuple, where the proxy is + the proxy used to reach that URI. Note that in the transport + system, we may choose to override the proxy value here. + + If this key format changes, a corresponding change should be + made in pkg.client.transport.engine.__cleanup_requests(..)""" + + u = self.uri + p = self.__proxy + + if self.uri: + u = self.uri.rstrip("/") + return (u, p) + + def __set_proxy(self, proxy): + assert not self.ssl_cert + assert not self.ssl_key + + if proxy and self.scheme == "file": + raise api_errors.UnsupportedRepositoryURIAttribute( + "proxy", scheme=self.scheme ) + if proxy: + self.__proxy = proxy.rstrip("/") + else: + self.__proxy = None + # Changing the proxy value causes us to clear any cached + # value we have in __runtime_proxy. + self.__runtime_proxy = None + + def __get_runtime_proxy(self): + """Returns the proxy that should be used at runtime, which may + differ from the persisted proxy value. We check for http_proxy, + https_proxy and all_proxy OS environment variables. + + To avoid repeated environment lookups, we cache the results.""" + + # we don't permit the proxy used by system publishers to be + # overridden by environment variables. + if self.system: + return self.proxy + + if not self.__runtime_proxy: + self.__runtime_proxy = misc.get_runtime_proxy( + self.__proxy, self.uri + ) + return self.__runtime_proxy + + def __set_runtime_proxy(self, runtime_proxy): + """The runtime proxy value is always computed dynamically, + we should not allow a caller to set it.""" + + assert False, "Refusing to set a runtime_proxy value." + + @staticmethod + def fromrepouri(repouri): + """Build a list of TransportRepositoryURI objects using + properties from the given RepositoryURI, 'repouri'. + + This is to allow the transport to try different paths to + a given RepositoryURI, if more than one is possible.""" + + trans_repouris = [] + # we just use the proxies for now, but in future, we may want + # other per-origin/mirror properties + if repouri.proxies: + for p in repouri.proxies: + t = TransportRepoURI( + repouri.uri, + priority=repouri.priority, + ssl_cert=repouri.ssl_cert, + ssl_key=repouri.ssl_key, + system=repouri.system, + trailing_slash=repouri.trailing_slash, + proxy=p.uri, + ) + trans_repouris.append(t) + else: + trans_repouris.append( + TransportRepoURI( + repouri.uri, + priority=repouri.priority, + ssl_cert=repouri.ssl_cert, + ssl_key=repouri.ssl_key, + system=repouri.system, + trailing_slash=repouri.trailing_slash, + ) + ) + return trans_repouris - system = property(lambda self: self.__system, __set_system, None, - "A boolean value indicating whether this repository URI is for" - "a system repository.") - - @property - def scheme(self): - """The URI scheme.""" - if not self.__uri: - return "" - return urlsplit(self.__uri, allow_fragments=0)[0] - - trailing_slash = property(lambda self: self.__trailing_slash, - __set_trailing_slash, None, - "A boolean value indicating whether any URI provided for this " - "object should have a trailing slash appended when setting the " - "URI property.") + proxy = property( + lambda self: self.__proxy, + __set_proxy, + None, + "The proxy that is used to access this repository." + "At runtime, a $http_proxy environnent variable might override this.", + ) + runtime_proxy = property( + __get_runtime_proxy, + __set_runtime_proxy, + None, + "The proxy to use to access this repository. This value checks" + "OS environment variables, and expands any $user:$password values.", + ) -class ProxyURI(RepositoryURI): - """A class to represent the URI of a proxy. The 'uri' value can be - 'None' if 'system' is set to True.""" - - def __init__(self, uri, system=False): - self.__system = None - self.system = system - if not system: - self.uri = uri - - def __set_system(self, value): - """A property to specify whether we should use the system - publisher as the proxy. Note that this method modifies the - 'uri' property when set or cleared.""" - if value not in (True, False): - raise api_errors.BadRepositoryAttributeValue( - "system", value=value) - self.__system = value - if value: - # Set a special value for the uri, intentionally an - # invalid URI which should get caught by any consumers - # using it by mistake. This also allows us to reuse - # the __eq__, __cmp__, etc. methods from the parent - # (where there is no public way of setting the URI to - # SYSREPO_PROXY, '') - self._override_uri(SYSREPO_PROXY) - else: - self._override_uri(None) - def __unsupported(self, value): - """A method used to prevent certain properties defined in the - parent class from being set on ProxyURI objects.""" +class Repository(object): + """Class representing a repository object. + + A repository object represents a location where clients can publish + and retrieve package content and/or metadata. It has the following + characteristics: + + - may have one or more origins (URIs) for publication and + retrieval of package metadata and content. + + - may have zero or more mirrors (URIs) for retrieval of package + content.""" + + # These properties are declared here so that they show up in the pydoc + # documentation as private, and for clarity in the property declarations + # found near the end of the class definition. + __collection_type = None + __legal_uris = [] + __mirrors = [] + __origins = [] + __refresh_seconds = None + __registration_uri = None + __related_uris = [] + __sort_policy = URI_SORT_PRIORITY + + # Used to store the id of the original object this one was copied + # from during __copy__. + _source_object_id = None + + name = None + description = None + registered = False + + def __init__( + self, + collection_type=REPO_CTYPE_CORE, + description=None, + legal_uris=None, + mirrors=None, + name=None, + origins=None, + refresh_seconds=None, + registered=False, + registration_uri=None, + related_uris=None, + sort_policy=URI_SORT_PRIORITY, + ): + """Initializes a repository object. + + 'collection_type' is an optional constant value indicating the + type of packages in the repository. + + 'description' is an optional string value containing a + descriptive paragraph for the repository. + + 'legal_uris' should be a list of RepositoryURI objects or URI + strings indicating where licensing, legal, and terms of service + information for the repository can be found. + + 'mirrors' is an optional list of RepositoryURI objects or URI + strings indicating where package content can be retrieved. + + 'name' is an optional, short, descriptive name for the + repository. + + 'origins' should be a list of RepositoryURI objects or URI + strings indicating where package metadata can be retrieved. + + 'refresh_seconds' is an optional integer value indicating the + number of seconds clients should wait before refreshing cached + repository catalog or repository metadata information. + + 'registered' is an optional boolean value indicating whether + a client has registered with the repository's publisher. + + 'registration_uri' is an optional RepositoryURI object or a URI + string indicating a location clients can use to register or + obtain credentials needed to access the repository. + + 'related_uris' is an optional list of RepositoryURI objects or a + list of URI strings indicating the location of related + repositories that a client may be interested in. + + 'sort_policy' is an optional constant value indicating how + legal_uris, mirrors, origins, and related_uris should be + sorted.""" + + # Note that the properties set here are intentionally lacking + # the '__' prefix which means assignment will occur using the + # get/set methods declared for the property near the end of + # the class definition. + + # Must be set first so that it will apply to attributes set + # afterwards. + self.sort_policy = sort_policy + + self.collection_type = collection_type + self.description = description + self.legal_uris = legal_uris + self.mirrors = mirrors + self.name = name + self.origins = origins + self.refresh_seconds = refresh_seconds + self.registered = registered + self.registration_uri = registration_uri + self.related_uris = related_uris + + def __add_uri( + self, + attr, + uri, + dup_check=None, + priority=None, + ssl_cert=None, + ssl_key=None, + trailing_slash=True, + ): + if not isinstance(uri, RepositoryURI): + uri = RepositoryURI( + uri, + priority=priority, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + trailing_slash=trailing_slash, + ) - # We don't expect this string to be exposed to users. - raise ValueError("This property cannot be set to {0} on a " - "ProxyURI object.".format(value)) + if dup_check: + dup_check(uri) + + ulist = getattr(self, attr) + ulist.append(uri) + ulist.sort(key=URI_SORT_POLICIES[self.__sort_policy]) + + def __copy__(self): + cluris = [copy.copy(u) for u in self.legal_uris] + cmirrors = [copy.copy(u) for u in self.mirrors] + cruris = [copy.copy(u) for u in self.related_uris] + corigins = [copy.copy(u) for u in self.origins] + + repo = Repository( + collection_type=self.collection_type, + description=self.description, + legal_uris=cluris, + mirrors=cmirrors, + name=self.name, + origins=corigins, + refresh_seconds=self.refresh_seconds, + registered=self.registered, + registration_uri=copy.copy(self.registration_uri), + related_uris=cruris, + ) + repo._source_object_id = id(self) + return repo + + def __replace_uris(self, attr, value, trailing_slash=True): + if value is None: + value = [] + if not isinstance(value, list): + raise api_errors.BadRepositoryAttributeValue(attr, value=value) + uris = [] + for u in value: + if not isinstance(u, RepositoryURI): + u = RepositoryURI(u, trailing_slash=trailing_slash) + elif trailing_slash: + u.uri = misc.url_affix_trailing_slash(u.uri) + uris.append(u) + uris.sort(key=URI_SORT_POLICIES[self.__sort_policy]) + return uris + + def __set_collection_type(self, value): + if value not in REPO_COLLECTION_TYPES: + raise api_errors.BadRepositoryCollectionType(value) + self.__collection_type = value + + def __set_legal_uris(self, value): + self.__legal_uris = self.__replace_uris( + "legal_uris", value, trailing_slash=False + ) + + def __set_mirrors(self, value): + self.__mirrors = self.__replace_uris("mirrors", value) + + def __set_origins(self, value): + self.__origins = self.__replace_uris("origins", value) + + def __set_registration_uri(self, value): + if value and not isinstance(value, RepositoryURI): + value = RepositoryURI(value, trailing_slash=False) + self.__registration_uri = value + + def __set_related_uris(self, value): + self.__related_uris = self.__replace_uris( + "related_uris", value, trailing_slash=False + ) + + def __set_refresh_seconds(self, value): + if value is not None: + try: + value = int(value) + except (TypeError, ValueError): + raise api_errors.BadRepositoryAttributeValue( + "refresh_seconds", value=value + ) + if value < 0: + raise api_errors.BadRepositoryAttributeValue( + "refresh_seconds", value=value + ) + self.__refresh_seconds = value + + def __set_sort_policy(self, value): + if value not in URI_SORT_POLICIES: + raise api_errors.BadRepositoryURISortPolicy(value) + self.__sort_policy = value + + def add_legal_uri(self, uri, priority=None, ssl_cert=None, ssl_key=None): + """Adds the specified legal URI to the repository. + + 'uri' can be a RepositoryURI object or a URI string. If + it is a RepositoryURI object, all other parameters will be + ignored.""" + + self.__add_uri( + "legal_uris", + uri, + priority=priority, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + trailing_slash=False, + ) + + def add_mirror(self, mirror, priority=None, ssl_cert=None, ssl_key=None): + """Adds the specified mirror to the repository. + + 'mirror' can be a RepositoryURI object or a URI string. If + it is a RepositoryURI object, all other parameters will be + ignored.""" + + def dup_check(mirror): + if self.has_mirror(mirror): + o = self.get_mirror(mirror) + if o.system: + raise api_errors.DuplicateSyspubMirror(mirror) + raise api_errors.DuplicateRepositoryMirror(mirror) + + self.__add_uri( + "mirrors", + mirror, + dup_check=dup_check, + priority=priority, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + ) + + def add_origin(self, origin, priority=None, ssl_cert=None, ssl_key=None): + """Adds the specified origin to the repository. + + 'origin' can be a RepositoryURI object or a URI string. If + it is a RepositoryURI object, all other parameters will be + ignored.""" + + def dup_check(origin): + if self.has_origin(origin): + o = self.get_origin(origin) + if o.system: + raise api_errors.DuplicateSyspubOrigin(origin) + raise api_errors.DuplicateRepositoryOrigin(origin) + + self.__add_uri( + "origins", + origin, + dup_check=dup_check, + priority=priority, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + ) + + def add_related_uri(self, uri, priority=None, ssl_cert=None, ssl_key=None): + """Adds the specified related URI to the repository. + + 'uri' can be a RepositoryURI object or a URI string. If + it is a RepositoryURI object, all other parameters will be + ignored.""" + + self.__add_uri( + "related_uris", + uri, + priority=priority, + ssl_cert=ssl_cert, + ssl_key=ssl_key, + trailing_slash=False, + ) + + def get_mirror(self, mirror): + """Returns a RepositoryURI object representing the mirror + that matches 'mirror'. + + 'mirror' can be a RepositoryURI object or a URI string.""" + + if not isinstance(mirror, RepositoryURI): + mirror = misc.url_affix_trailing_slash(mirror) + for m in self.mirrors: + if mirror == m.uri: + return m + raise api_errors.UnknownRepositoryMirror(mirror) + + def get_origin(self, origin): + """Returns a RepositoryURI object representing the origin + that matches 'origin'. + + 'origin' can be a RepositoryURI object or a URI string.""" + + if not isinstance(origin, RepositoryURI): + origin = misc.url_affix_trailing_slash(origin) + for o in self.origins: + if origin == o.uri: + return o + raise api_errors.UnknownRepositoryOrigin(origin) + + def has_mirror(self, mirror): + """Returns a boolean value indicating whether a matching + 'mirror' exists for the repository. + + 'mirror' can be a RepositoryURI object or a URI string.""" + + if not isinstance(mirror, RepositoryURI): + mirror = RepositoryURI(mirror) + return mirror in self.mirrors + + def has_origin(self, origin): + """Returns a boolean value indicating whether a matching + 'origin' exists for the repository. + + 'origin' can be a RepositoryURI object or a URI string.""" + + if not isinstance(origin, RepositoryURI): + origin = RepositoryURI(origin) + return origin in self.origins + + def remove_legal_uri(self, uri): + """Removes the legal URI matching 'uri' from the repository. + + 'uri' can be a RepositoryURI object or a URI string.""" + + for i, m in enumerate(self.legal_uris): + if uri == m.uri: + # Immediate return as the index into the array + # changes with each removal. + del self.legal_uris[i] + return + raise api_errors.UnknownLegalURI(uri) + + def remove_mirror(self, mirror): + """Removes the mirror matching 'mirror' from the repository. + + 'mirror' can be a RepositoryURI object or a URI string.""" + + if not isinstance(mirror, RepositoryURI): + mirror = misc.url_affix_trailing_slash(mirror) + for i, m in enumerate(self.mirrors): + if mirror == m.uri: + if m.system: + api_errors.RemoveSyspubMirror(mirror.uri) + # Immediate return as the index into the array + # changes with each removal. + del self.mirrors[i] + return + raise api_errors.UnknownRepositoryMirror(mirror) + + def remove_origin(self, origin): + """Removes the origin matching 'origin' from the repository. + + 'origin' can be a RepositoryURI object or a URI string.""" + + if not isinstance(origin, RepositoryURI): + origin = RepositoryURI(origin) + for i, o in enumerate(self.origins): + if origin == o.uri: + if o.system: + raise api_errors.RemoveSyspubOrigin(origin.uri) + # Immediate return as the index into the array + # changes with each removal. + del self.origins[i] + return + raise api_errors.UnknownRepositoryOrigin(origin) + + def remove_related_uri(self, uri): + """Removes the related URI matching 'uri' from the repository. + + 'uri' can be a RepositoryURI object or a URI string.""" + + for i, m in enumerate(self.related_uris): + if uri == m.uri: + # Immediate return as the index into the array + # changes with each removal. + del self.related_uris[i] + return + raise api_errors.UnknownRelatedURI(uri) + + def update_mirror(self, mirror, priority=None, ssl_cert=None, ssl_key=None): + """Updates an existing mirror object matching 'mirror'. + + 'mirror' can be a RepositoryURI object or a URI string. + + This method is deprecated, and may be removed in future API + versions.""" + + if not isinstance(mirror, RepositoryURI): + mirror = RepositoryURI( + mirror, priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key + ) - system = property(lambda self: self.__system, __set_system, None, - "True, if we should use the system publisher as a proxy.") + target = self.get_mirror(mirror) + target.priority = mirror.priority + target.ssl_cert = mirror.ssl_cert + target.ssl_key = mirror.ssl_key + target.proxies = mirror.proxies + self.mirrors.sort(key=URI_SORT_POLICIES[self.__sort_policy]) - # Ensure we can't set any of the following properties. - proxies = property(lambda self: None, __unsupported, None, - "proxies is an invalid property for ProxyURI properties") + def update_origin(self, origin, priority=None, ssl_cert=None, ssl_key=None): + """Updates an existing origin object matching 'origin'. - ssl_cert = property(lambda self: None, __unsupported, None, - "ssl_cert is an invalid property for ProxyURI properties") + 'origin' can be a RepositoryURI object or a URI string. - ssl_key = property(lambda self: None, __unsupported, None, - "ssl_key is an invalid property for ProxyURI properties") + This method is deprecated, and may be removed in future API + versions.""" - priority = property(lambda self: None, __unsupported, None, - "priority is an invalid property for ProxyURI properties") + if not isinstance(origin, RepositoryURI): + origin = RepositoryURI( + origin, priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key + ) - trailing_slash = property(lambda self: None, __unsupported, None, - "trailing_slash is an invalid property for ProxyURI properties") + target = self.get_origin(origin) + target.priority = origin.priority + target.ssl_cert = origin.ssl_cert + target.ssl_key = origin.ssl_key + target.proxies = origin.proxies + self.origins.sort(key=URI_SORT_POLICIES[self.__sort_policy]) + def reset_mirrors(self): + """Discards the current list of repository mirrors.""" -class TransportRepoURI(RepositoryURI): - """A TransportRepoURI allows for multiple representations of a given - RepositoryURI, each with different properties. - - One RepositoryURI could be represented by several TransportRepoURIs, - used to allow the transport to properly track repo statistics for - for each discrete path to a given URI, perhaps using different proxies - or trying one of several SSL key/cert pairs.""" - - def __init__(self, uri, priority=None, ssl_cert=None, ssl_key=None, - trailing_slash=True, proxy=None, system=False): - # Must set first. - self.__proxy = None - self.__runtime_proxy = None - self.proxy = proxy - - RepositoryURI.__init__(self, uri, priority=priority, - ssl_cert=ssl_cert, ssl_key=ssl_key, - trailing_slash=trailing_slash, system=system) - - def __eq__(self, other): - if isinstance(other, TransportRepoURI): - return self.uri == other.uri and \ - self.proxy == other.proxy - if isinstance(other, six.string_types): - return self.uri == other and self.proxy == None - return False - - def __ne__(self, other): - if isinstance(other, TransportRepoURI): - return self.uri != other.uri or \ - self.proxy != other.proxy - if isinstance(other, six.string_types): - return self.uri != other or self.proxy != None - return True - - __hash__ = object.__hash__ - - def __lt__(self, other): - if not other: - return False - if isinstance(other, six.string_types): - other = TransportRepoURI(other) - elif not isinstance(other, TransportRepoURI): - return False - if self.uri < other.uri: - return True - if self.uri != other.uri: - return False - return self.proxy < other.proxy - - def __gt__(self, other): - if not other: - return True - if isinstance(other, six.string_types): - other = TransportRepoURI(other) - elif not isinstance(other, TransportRepoURI): - return True - if self.uri > other.uri: - return True - if self.uri != other.uri: - return False - return self.proxy > other.proxy - - def __le__(self, other): - return self == other or self < other - - def __ge__(self, other): - return self == other or self > other - - def key(self): - """Returns a value that can be used to identify this RepoURI - uniquely for the transport system. Normally, this would be done - using __hash__() however, TransportRepoURI objects are not - guaranteed to be immutable. - - The key is a (uri, proxy) tuple, where the proxy is - the proxy used to reach that URI. Note that in the transport - system, we may choose to override the proxy value here. - - If this key format changes, a corresponding change should be - made in pkg.client.transport.engine.__cleanup_requests(..)""" - - u = self.uri - p = self.__proxy - - if self.uri: - u = self.uri.rstrip("/") - return (u, p) - - def __set_proxy(self, proxy): - assert not self.ssl_cert - assert not self.ssl_key - - if proxy and self.scheme == "file": - raise api_errors.UnsupportedRepositoryURIAttribute( - "proxy", scheme=self.scheme) - if proxy: - self.__proxy = proxy.rstrip("/") - else: - self.__proxy = None - # Changing the proxy value causes us to clear any cached - # value we have in __runtime_proxy. - self.__runtime_proxy = None - - def __get_runtime_proxy(self): - """Returns the proxy that should be used at runtime, which may - differ from the persisted proxy value. We check for http_proxy, - https_proxy and all_proxy OS environment variables. - - To avoid repeated environment lookups, we cache the results.""" - - # we don't permit the proxy used by system publishers to be - # overridden by environment variables. - if self.system: - return self.proxy - - if not self.__runtime_proxy: - self.__runtime_proxy = misc.get_runtime_proxy( - self.__proxy, self.uri) - return self.__runtime_proxy - - def __set_runtime_proxy(self, runtime_proxy): - """The runtime proxy value is always computed dynamically, - we should not allow a caller to set it.""" - - assert False, "Refusing to set a runtime_proxy value." - - @staticmethod - def fromrepouri(repouri): - """Build a list of TransportRepositoryURI objects using - properties from the given RepositoryURI, 'repouri'. - - This is to allow the transport to try different paths to - a given RepositoryURI, if more than one is possible.""" - - trans_repouris = [] - # we just use the proxies for now, but in future, we may want - # other per-origin/mirror properties - if repouri.proxies: - for p in repouri.proxies: - t = TransportRepoURI(repouri.uri, - priority=repouri.priority, - ssl_cert=repouri.ssl_cert, - ssl_key=repouri.ssl_key, - system=repouri.system, - trailing_slash=repouri.trailing_slash, - proxy=p.uri) - trans_repouris.append(t) - else: - trans_repouris.append(TransportRepoURI(repouri.uri, - priority=repouri.priority, - ssl_cert=repouri.ssl_cert, - ssl_key=repouri.ssl_key, - system=repouri.system, - trailing_slash=repouri.trailing_slash)) - return trans_repouris - - proxy = property(lambda self: self.__proxy, __set_proxy, None, - "The proxy that is used to access this repository." - "At runtime, a $http_proxy environnent variable might override this." - ) + self.mirrors = [] - runtime_proxy = property(__get_runtime_proxy, __set_runtime_proxy, None, - "The proxy to use to access this repository. This value checks" - "OS environment variables, and expands any $user:$password values.") + def reset_origins(self): + """Discards the current list of repository origins.""" + self.origins = [] -class Repository(object): - """Class representing a repository object. - - A repository object represents a location where clients can publish - and retrieve package content and/or metadata. It has the following - characteristics: - - - may have one or more origins (URIs) for publication and - retrieval of package metadata and content. - - - may have zero or more mirrors (URIs) for retrieval of package - content.""" - - # These properties are declared here so that they show up in the pydoc - # documentation as private, and for clarity in the property declarations - # found near the end of the class definition. - __collection_type = None - __legal_uris = [] - __mirrors = [] - __origins = [] - __refresh_seconds = None - __registration_uri = None - __related_uris = [] - __sort_policy = URI_SORT_PRIORITY - - # Used to store the id of the original object this one was copied - # from during __copy__. - _source_object_id = None - - name = None - description = None - registered = False - - def __init__(self, collection_type=REPO_CTYPE_CORE, description=None, - legal_uris=None, mirrors=None, name=None, origins=None, - refresh_seconds=None, registered=False, registration_uri=None, - related_uris=None, sort_policy=URI_SORT_PRIORITY): - """Initializes a repository object. - - 'collection_type' is an optional constant value indicating the - type of packages in the repository. - - 'description' is an optional string value containing a - descriptive paragraph for the repository. - - 'legal_uris' should be a list of RepositoryURI objects or URI - strings indicating where licensing, legal, and terms of service - information for the repository can be found. - - 'mirrors' is an optional list of RepositoryURI objects or URI - strings indicating where package content can be retrieved. - - 'name' is an optional, short, descriptive name for the - repository. - - 'origins' should be a list of RepositoryURI objects or URI - strings indicating where package metadata can be retrieved. - - 'refresh_seconds' is an optional integer value indicating the - number of seconds clients should wait before refreshing cached - repository catalog or repository metadata information. - - 'registered' is an optional boolean value indicating whether - a client has registered with the repository's publisher. - - 'registration_uri' is an optional RepositoryURI object or a URI - string indicating a location clients can use to register or - obtain credentials needed to access the repository. - - 'related_uris' is an optional list of RepositoryURI objects or a - list of URI strings indicating the location of related - repositories that a client may be interested in. - - 'sort_policy' is an optional constant value indicating how - legal_uris, mirrors, origins, and related_uris should be - sorted.""" - - # Note that the properties set here are intentionally lacking - # the '__' prefix which means assignment will occur using the - # get/set methods declared for the property near the end of - # the class definition. - - # Must be set first so that it will apply to attributes set - # afterwards. - self.sort_policy = sort_policy - - self.collection_type = collection_type - self.description = description - self.legal_uris = legal_uris - self.mirrors = mirrors - self.name = name - self.origins = origins - self.refresh_seconds = refresh_seconds - self.registered = registered - self.registration_uri = registration_uri - self.related_uris = related_uris - - def __add_uri(self, attr, uri, dup_check=None, priority=None, - ssl_cert=None, ssl_key=None, trailing_slash=True): - if not isinstance(uri, RepositoryURI): - uri = RepositoryURI(uri, priority=priority, - ssl_cert=ssl_cert, ssl_key=ssl_key, - trailing_slash=trailing_slash) - - if dup_check: - dup_check(uri) - - ulist = getattr(self, attr) - ulist.append(uri) - ulist.sort(key=URI_SORT_POLICIES[self.__sort_policy]) - - def __copy__(self): - cluris = [copy.copy(u) for u in self.legal_uris] - cmirrors = [copy.copy(u) for u in self.mirrors] - cruris = [copy.copy(u) for u in self.related_uris] - corigins = [copy.copy(u) for u in self.origins] - - repo = Repository(collection_type=self.collection_type, - description=self.description, - legal_uris=cluris, - mirrors=cmirrors, name=self.name, - origins=corigins, - refresh_seconds=self.refresh_seconds, - registered=self.registered, - registration_uri=copy.copy(self.registration_uri), - related_uris=cruris) - repo._source_object_id = id(self) - return repo - - def __replace_uris(self, attr, value, trailing_slash=True): - if value is None: - value = [] - if not isinstance(value, list): - raise api_errors.BadRepositoryAttributeValue(attr, - value=value) - uris = [] - for u in value: - if not isinstance(u, RepositoryURI): - u = RepositoryURI(u, - trailing_slash=trailing_slash) - elif trailing_slash: - u.uri = misc.url_affix_trailing_slash(u.uri) - uris.append(u) - uris.sort(key=URI_SORT_POLICIES[self.__sort_policy]) - return uris - - def __set_collection_type(self, value): - if value not in REPO_COLLECTION_TYPES: - raise api_errors.BadRepositoryCollectionType(value) - self.__collection_type = value - - def __set_legal_uris(self, value): - self.__legal_uris = self.__replace_uris("legal_uris", value, - trailing_slash=False) - - def __set_mirrors(self, value): - self.__mirrors = self.__replace_uris("mirrors", value) - - def __set_origins(self, value): - self.__origins = self.__replace_uris("origins", value) - - def __set_registration_uri(self, value): - if value and not isinstance(value, RepositoryURI): - value = RepositoryURI(value, trailing_slash=False) - self.__registration_uri = value - - def __set_related_uris(self, value): - self.__related_uris = self.__replace_uris("related_uris", - value, trailing_slash=False) - - def __set_refresh_seconds(self, value): - if value is not None: - try: - value = int(value) - except (TypeError, ValueError): - raise api_errors.BadRepositoryAttributeValue( - "refresh_seconds", value=value) - if value < 0: - raise api_errors.BadRepositoryAttributeValue( - "refresh_seconds", value=value) - self.__refresh_seconds = value - - def __set_sort_policy(self, value): - if value not in URI_SORT_POLICIES: - raise api_errors.BadRepositoryURISortPolicy(value) - self.__sort_policy = value - - def add_legal_uri(self, uri, priority=None, ssl_cert=None, - ssl_key=None): - """Adds the specified legal URI to the repository. - - 'uri' can be a RepositoryURI object or a URI string. If - it is a RepositoryURI object, all other parameters will be - ignored.""" - - self.__add_uri("legal_uris", uri, priority=priority, - ssl_cert=ssl_cert, ssl_key=ssl_key, trailing_slash=False) - - def add_mirror(self, mirror, priority=None, ssl_cert=None, - ssl_key=None): - """Adds the specified mirror to the repository. - - 'mirror' can be a RepositoryURI object or a URI string. If - it is a RepositoryURI object, all other parameters will be - ignored.""" - - def dup_check(mirror): - if self.has_mirror(mirror): - o = self.get_mirror(mirror) - if o.system: - raise api_errors.DuplicateSyspubMirror( - mirror) - raise api_errors.DuplicateRepositoryMirror( - mirror) - - self.__add_uri("mirrors", mirror, dup_check=dup_check, - priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key) - - def add_origin(self, origin, priority=None, ssl_cert=None, - ssl_key=None): - """Adds the specified origin to the repository. - - 'origin' can be a RepositoryURI object or a URI string. If - it is a RepositoryURI object, all other parameters will be - ignored.""" - - def dup_check(origin): - if self.has_origin(origin): - o = self.get_origin(origin) - if o.system: - raise api_errors.DuplicateSyspubOrigin( - origin) - raise api_errors.DuplicateRepositoryOrigin( - origin) - - self.__add_uri("origins", origin, dup_check=dup_check, - priority=priority, ssl_cert=ssl_cert, ssl_key=ssl_key) - - def add_related_uri(self, uri, priority=None, ssl_cert=None, - ssl_key=None): - """Adds the specified related URI to the repository. - - 'uri' can be a RepositoryURI object or a URI string. If - it is a RepositoryURI object, all other parameters will be - ignored.""" - - self.__add_uri("related_uris", uri, priority=priority, - ssl_cert=ssl_cert, ssl_key=ssl_key, trailing_slash=False) - - def get_mirror(self, mirror): - """Returns a RepositoryURI object representing the mirror - that matches 'mirror'. - - 'mirror' can be a RepositoryURI object or a URI string.""" - - if not isinstance(mirror, RepositoryURI): - mirror = misc.url_affix_trailing_slash(mirror) - for m in self.mirrors: - if mirror == m.uri: - return m - raise api_errors.UnknownRepositoryMirror(mirror) - - def get_origin(self, origin): - """Returns a RepositoryURI object representing the origin - that matches 'origin'. - - 'origin' can be a RepositoryURI object or a URI string.""" - - if not isinstance(origin, RepositoryURI): - origin = misc.url_affix_trailing_slash(origin) - for o in self.origins: - if origin == o.uri: - return o - raise api_errors.UnknownRepositoryOrigin(origin) - - def has_mirror(self, mirror): - """Returns a boolean value indicating whether a matching - 'mirror' exists for the repository. - - 'mirror' can be a RepositoryURI object or a URI string.""" - - if not isinstance(mirror, RepositoryURI): - mirror = RepositoryURI(mirror) - return mirror in self.mirrors - - def has_origin(self, origin): - """Returns a boolean value indicating whether a matching - 'origin' exists for the repository. - - 'origin' can be a RepositoryURI object or a URI string.""" - - if not isinstance(origin, RepositoryURI): - origin = RepositoryURI(origin) - return origin in self.origins - - def remove_legal_uri(self, uri): - """Removes the legal URI matching 'uri' from the repository. - - 'uri' can be a RepositoryURI object or a URI string.""" - - for i, m in enumerate(self.legal_uris): - if uri == m.uri: - # Immediate return as the index into the array - # changes with each removal. - del self.legal_uris[i] - return - raise api_errors.UnknownLegalURI(uri) - - def remove_mirror(self, mirror): - """Removes the mirror matching 'mirror' from the repository. - - 'mirror' can be a RepositoryURI object or a URI string.""" - - if not isinstance(mirror, RepositoryURI): - mirror = misc.url_affix_trailing_slash(mirror) - for i, m in enumerate(self.mirrors): - if mirror == m.uri: - if m.system: - api_errors.RemoveSyspubMirror( - mirror.uri) - # Immediate return as the index into the array - # changes with each removal. - del self.mirrors[i] - return - raise api_errors.UnknownRepositoryMirror(mirror) - - def remove_origin(self, origin): - """Removes the origin matching 'origin' from the repository. - - 'origin' can be a RepositoryURI object or a URI string.""" - - if not isinstance(origin, RepositoryURI): - origin = RepositoryURI(origin) - for i, o in enumerate(self.origins): - if origin == o.uri: - if o.system: - raise api_errors.RemoveSyspubOrigin( - origin.uri) - # Immediate return as the index into the array - # changes with each removal. - del self.origins[i] - return - raise api_errors.UnknownRepositoryOrigin(origin) - - def remove_related_uri(self, uri): - """Removes the related URI matching 'uri' from the repository. - - 'uri' can be a RepositoryURI object or a URI string.""" - - for i, m in enumerate(self.related_uris): - if uri == m.uri: - # Immediate return as the index into the array - # changes with each removal. - del self.related_uris[i] - return - raise api_errors.UnknownRelatedURI(uri) - - def update_mirror(self, mirror, priority=None, ssl_cert=None, - ssl_key=None): - """Updates an existing mirror object matching 'mirror'. - - 'mirror' can be a RepositoryURI object or a URI string. - - This method is deprecated, and may be removed in future API - versions.""" - - if not isinstance(mirror, RepositoryURI): - mirror = RepositoryURI(mirror, priority=priority, - ssl_cert=ssl_cert, ssl_key=ssl_key) - - target = self.get_mirror(mirror) - target.priority = mirror.priority - target.ssl_cert = mirror.ssl_cert - target.ssl_key = mirror.ssl_key - target.proxies = mirror.proxies - self.mirrors.sort(key=URI_SORT_POLICIES[self.__sort_policy]) - - def update_origin(self, origin, priority=None, ssl_cert=None, - ssl_key=None): - """Updates an existing origin object matching 'origin'. - - 'origin' can be a RepositoryURI object or a URI string. - - This method is deprecated, and may be removed in future API - versions.""" - - if not isinstance(origin, RepositoryURI): - origin = RepositoryURI(origin, priority=priority, - ssl_cert=ssl_cert, ssl_key=ssl_key) - - target = self.get_origin(origin) - target.priority = origin.priority - target.ssl_cert = origin.ssl_cert - target.ssl_key = origin.ssl_key - target.proxies = origin.proxies - self.origins.sort(key=URI_SORT_POLICIES[self.__sort_policy]) - - def reset_mirrors(self): - """Discards the current list of repository mirrors.""" - - self.mirrors = [] - - def reset_origins(self): - """Discards the current list of repository origins.""" - - self.origins = [] - - collection_type = property(lambda self: self.__collection_type, - __set_collection_type, None, - """A constant value indicating the type of packages in the + collection_type = property( + lambda self: self.__collection_type, + __set_collection_type, + None, + """A constant value indicating the type of packages in the repository. The following collection types are recognized: REPO_CTYPE_CORE @@ -1131,417 +1303,481 @@ def reset_origins(self): REPO_CTYPE_SUPPLEMENTAL The "supplemental" type indicates that the repository contains packages that rely on or are intended to be - used with packages located in another repository.""") - - legal_uris = property(lambda self: self.__legal_uris, - __set_legal_uris, None, - """A list of RepositoryURI objects indicating where licensing, + used with packages located in another repository.""", + ) + + legal_uris = property( + lambda self: self.__legal_uris, + __set_legal_uris, + None, + """A list of RepositoryURI objects indicating where licensing, legal, and terms of service information for the repository can be - found.""") - - mirrors = property(lambda self: self.__mirrors, __set_mirrors, None, - """A list of RepositoryURI objects indicating where package content + found.""", + ) + + mirrors = property( + lambda self: self.__mirrors, + __set_mirrors, + None, + """A list of RepositoryURI objects indicating where package content can be retrieved. If any value in the list provided is a URI - string, it will be replaced with a RepositoryURI object.""") - - origins = property(lambda self: self.__origins, __set_origins, None, - """A list of RepositoryURI objects indicating where package content + string, it will be replaced with a RepositoryURI object.""", + ) + + origins = property( + lambda self: self.__origins, + __set_origins, + None, + """A list of RepositoryURI objects indicating where package content can be retrieved. If any value in the list provided is a URI - string, it will be replaced with a RepositoryURI object.""") - - registration_uri = property(lambda self: self.__registration_uri, - __set_registration_uri, None, - """A RepositoryURI object indicating a location clients can use to + string, it will be replaced with a RepositoryURI object.""", + ) + + registration_uri = property( + lambda self: self.__registration_uri, + __set_registration_uri, + None, + """A RepositoryURI object indicating a location clients can use to register or obtain credentials needed to access the repository. If the value provided is a URI string, it will be replaced with a - RepositoryURI object.""") - - related_uris = property(lambda self: self.__related_uris, - __set_related_uris, None, - """A list of RepositoryURI objects indicating the location of + RepositoryURI object.""", + ) + + related_uris = property( + lambda self: self.__related_uris, + __set_related_uris, + None, + """A list of RepositoryURI objects indicating the location of related repositories that a client may be interested in. If any value in the list provided is a URI string, it will be replaced with - a RepositoryURI object.""") - - refresh_seconds = property(lambda self: self.__refresh_seconds, - __set_refresh_seconds, None, - """An integer value indicating the number of seconds clients should + a RepositoryURI object.""", + ) + + refresh_seconds = property( + lambda self: self.__refresh_seconds, + __set_refresh_seconds, + None, + """An integer value indicating the number of seconds clients should wait before refreshing cached repository metadata information. A value of None indicates that refreshes should be performed at the - client's discretion.""") - - sort_policy = property(lambda self: self.__sort_policy, - __set_sort_policy, None, - """A constant value indicating how legal_uris, mirrors, origins, and + client's discretion.""", + ) + + sort_policy = property( + lambda self: self.__sort_policy, + __set_sort_policy, + None, + """A constant value indicating how legal_uris, mirrors, origins, and related_uris should be sorted. The following policies are recognized: URI_SORT_PRIORITY The "priority" policy indicate that URIs should be sorted according to the value of their priority - attribute.""") + attribute.""", + ) class Publisher(object): - """Class representing a publisher object and a set of interfaces to set - and retrieve its information. - - A publisher is a forward or reverse domain name identifying a source - (e.g. "publisher") of packages.""" - - # These properties are declared here so that they show up in the pydoc - # documentation as private, and for clarity in the property declarations - # found near the end of the class definition. - _catalog = None - __alias = None - __client_uuid = None - __client_uuid_time = None - __disabled = False - __meta_root = None - __origin_root = None - __prefix = None - __repository = None - __sticky = True - transport = None - - # Used to store the id of the original object this one was copied - # from during __copy__. - _source_object_id = None - - - def __init__(self, prefix, alias=None, catalog=None, client_uuid=None, - disabled=False, meta_root=None, repository=None, - transport=None, sticky=True, props=None, revoked_ca_certs=EmptyI, - approved_ca_certs=EmptyI, sys_pub=False, client_uuid_time=None): - """Initialize a new publisher object. - - 'catalog' is an optional Catalog object to use in place of - retrieving one from the publisher's meta_root. This option - may only be used when meta_root is not provided. - """ - - assert not (catalog and meta_root) - - if (client_uuid is None or client_uuid_time is None - or not len(client_uuid_time)): - self.reset_client_uuid() - else: - self.__client_uuid = client_uuid - self.__client_uuid_time = client_uuid_time - - self.sys_pub = False - - # Note that the properties set here are intentionally lacking - # the '__' prefix which means assignment will occur using the - # get/set methods declared for the property near the end of - # the class definition. - self.alias = alias - self.disabled = disabled - self.prefix = prefix - self.transport = transport - self.meta_root = meta_root - self.sticky = sticky - - - self.__sig_policy = None - self.__delay_validation = False - - self.__properties = {} - - # Writing out an EmptyI to a config file and reading it back - # in doesn't work correctly at the moment, but reading and - # writing an empty list does. So if intermediate_certs is empty, - # make sure it's stored as an empty list. - # - # The relevant implementation is probably the line which - # strips ][ from the input in imageconfig.read_list. - if revoked_ca_certs: - self.revoked_ca_certs = revoked_ca_certs - else: - self.revoked_ca_certs = [] - - if approved_ca_certs: - self.approved_ca_certs = approved_ca_certs - else: - self.approved_ca_certs = [] - - if props: - self.properties.update(props) - - self.ca_dict = None - - if repository: - self.repository = repository - self.sys_pub = sys_pub - - # A dictionary to story the mapping for subject -> certificate - # for those certificates we couldn't store on disk. - self.__issuers = {} - - # Must be done last. - self._catalog = catalog - - def __lt__(self, other): - if other is None: - return False - if isinstance(other, Publisher): - return self.prefix < other.prefix - return self.prefix < other - - def __gt__(self, other): - if other is None: - return True - if isinstance(other, Publisher): - return self.prefix > other.prefix - return self.prefix > other - - def __le__(self, other): - return not self > other - - def __ge__(self, other): - return not self < other - - @staticmethod - def __contains__(key): - """Supports deprecated compatibility interface.""" - - return key in ("client_uuid", "disabled", "mirrors", "origin", - "prefix", "ssl_cert", "ssl_key") - - def __copy__(self): - selected = None - pub = Publisher(self.__prefix, alias=self.__alias, - client_uuid=self.__client_uuid, disabled=self.__disabled, - meta_root=self.meta_root, - repository=copy.copy(self.repository), - transport=self.transport, sticky=self.__sticky, - props=self.properties, - revoked_ca_certs=self.revoked_ca_certs, - approved_ca_certs=self.approved_ca_certs, - sys_pub=self.sys_pub, - client_uuid_time=self.__client_uuid_time) - pub._catalog = self._catalog - pub._source_object_id = id(self) - return pub - - def __eq__(self, other): - if isinstance(other, Publisher): - return self.prefix == other.prefix - if isinstance(other, str): - return self.prefix == other - return False - - __hash__ = object.__hash__ - - def __getitem__(self, key): - """Deprecated compatibility interface allowing publisher - attributes to be read as pub["attribute"].""" - - if key == "client_uuid": - return self.__client_uuid - if key == "disabled": - return self.__disabled - if key == "prefix": - return self.__prefix - - repo = self.repository - if key == "mirrors": - return [str(m) for m in repo.mirrors] - if key == "origin": - if not repo.origins[0]: - return None - return repo.origins[0].uri - if key == "ssl_cert": - if not repo.origins[0]: - return None - return repo.origins[0].ssl_cert - if key == "ssl_key": - if not repo.origins[0]: - return None - return repo.origins[0].ssl_key - - def __get_last_refreshed(self): - if not self.meta_root: - return None - - lcfile = os.path.join(self.meta_root, "last_refreshed") - try: - mod_time = os.stat(lcfile).st_mtime - except EnvironmentError as e: - if e.errno == errno.ENOENT: - return None - raise - return dt.datetime.utcfromtimestamp(mod_time) - - def __get_nochild(self): - try: - return self.__properties['nochild'] - except KeyError: - return False - - def __ne__(self, other): - if isinstance(other, Publisher): - return self.prefix != other.prefix - if isinstance(other, str): - return self.prefix != other - return True - - def __set_alias(self, value): - if self.sys_pub: - raise api_errors.ModifyingSyspubException( - "Cannot set the alias of a system publisher") - # Aliases must comply with the same restrictions that prefixes - # have as they are intended to be useable in any case where - # a prefix may be used. - if value is not None and value != "" and \ - not misc.valid_pub_prefix(value): - raise api_errors.BadPublisherAlias(value) - self.__alias = value - - def __set_disabled(self, disabled): - if self.sys_pub: - raise api_errors.ModifyingSyspubException(_("Cannot " - "enable or disable a system publisher")) - - if disabled: - self.__disabled = True - else: - self.__disabled = False - - def __set_last_refreshed(self, value): - if not self.meta_root: - return + """Class representing a publisher object and a set of interfaces to set + and retrieve its information. + + A publisher is a forward or reverse domain name identifying a source + (e.g. "publisher") of packages.""" + + # These properties are declared here so that they show up in the pydoc + # documentation as private, and for clarity in the property declarations + # found near the end of the class definition. + _catalog = None + __alias = None + __client_uuid = None + __client_uuid_time = None + __disabled = False + __meta_root = None + __origin_root = None + __prefix = None + __repository = None + __sticky = True + transport = None + + # Used to store the id of the original object this one was copied + # from during __copy__. + _source_object_id = None + + def __init__( + self, + prefix, + alias=None, + catalog=None, + client_uuid=None, + disabled=False, + meta_root=None, + repository=None, + transport=None, + sticky=True, + props=None, + revoked_ca_certs=EmptyI, + approved_ca_certs=EmptyI, + sys_pub=False, + client_uuid_time=None, + ): + """Initialize a new publisher object. + + 'catalog' is an optional Catalog object to use in place of + retrieving one from the publisher's meta_root. This option + may only be used when meta_root is not provided. + """ + + assert not (catalog and meta_root) + + if ( + client_uuid is None + or client_uuid_time is None + or not len(client_uuid_time) + ): + self.reset_client_uuid() + else: + self.__client_uuid = client_uuid + self.__client_uuid_time = client_uuid_time + + self.sys_pub = False + + # Note that the properties set here are intentionally lacking + # the '__' prefix which means assignment will occur using the + # get/set methods declared for the property near the end of + # the class definition. + self.alias = alias + self.disabled = disabled + self.prefix = prefix + self.transport = transport + self.meta_root = meta_root + self.sticky = sticky + + self.__sig_policy = None + self.__delay_validation = False + + self.__properties = {} + + # Writing out an EmptyI to a config file and reading it back + # in doesn't work correctly at the moment, but reading and + # writing an empty list does. So if intermediate_certs is empty, + # make sure it's stored as an empty list. + # + # The relevant implementation is probably the line which + # strips ][ from the input in imageconfig.read_list. + if revoked_ca_certs: + self.revoked_ca_certs = revoked_ca_certs + else: + self.revoked_ca_certs = [] + + if approved_ca_certs: + self.approved_ca_certs = approved_ca_certs + else: + self.approved_ca_certs = [] + + if props: + self.properties.update(props) + + self.ca_dict = None + + if repository: + self.repository = repository + self.sys_pub = sys_pub + + # A dictionary to story the mapping for subject -> certificate + # for those certificates we couldn't store on disk. + self.__issuers = {} + + # Must be done last. + self._catalog = catalog + + def __lt__(self, other): + if other is None: + return False + if isinstance(other, Publisher): + return self.prefix < other.prefix + return self.prefix < other + + def __gt__(self, other): + if other is None: + return True + if isinstance(other, Publisher): + return self.prefix > other.prefix + return self.prefix > other + + def __le__(self, other): + return not self > other + + def __ge__(self, other): + return not self < other + + @staticmethod + def __contains__(key): + """Supports deprecated compatibility interface.""" + + return key in ( + "client_uuid", + "disabled", + "mirrors", + "origin", + "prefix", + "ssl_cert", + "ssl_key", + ) + + def __copy__(self): + selected = None + pub = Publisher( + self.__prefix, + alias=self.__alias, + client_uuid=self.__client_uuid, + disabled=self.__disabled, + meta_root=self.meta_root, + repository=copy.copy(self.repository), + transport=self.transport, + sticky=self.__sticky, + props=self.properties, + revoked_ca_certs=self.revoked_ca_certs, + approved_ca_certs=self.approved_ca_certs, + sys_pub=self.sys_pub, + client_uuid_time=self.__client_uuid_time, + ) + pub._catalog = self._catalog + pub._source_object_id = id(self) + return pub + + def __eq__(self, other): + if isinstance(other, Publisher): + return self.prefix == other.prefix + if isinstance(other, str): + return self.prefix == other + return False + + __hash__ = object.__hash__ + + def __getitem__(self, key): + """Deprecated compatibility interface allowing publisher + attributes to be read as pub["attribute"].""" + + if key == "client_uuid": + return self.__client_uuid + if key == "disabled": + return self.__disabled + if key == "prefix": + return self.__prefix + + repo = self.repository + if key == "mirrors": + return [str(m) for m in repo.mirrors] + if key == "origin": + if not repo.origins[0]: + return None + return repo.origins[0].uri + if key == "ssl_cert": + if not repo.origins[0]: + return None + return repo.origins[0].ssl_cert + if key == "ssl_key": + if not repo.origins[0]: + return None + return repo.origins[0].ssl_key + + def __get_last_refreshed(self): + if not self.meta_root: + return None + + lcfile = os.path.join(self.meta_root, "last_refreshed") + try: + mod_time = os.stat(lcfile).st_mtime + except EnvironmentError as e: + if e.errno == errno.ENOENT: + return None + raise + return dt.datetime.utcfromtimestamp(mod_time) + + def __get_nochild(self): + try: + return self.__properties["nochild"] + except KeyError: + return False + + def __ne__(self, other): + if isinstance(other, Publisher): + return self.prefix != other.prefix + if isinstance(other, str): + return self.prefix != other + return True + + def __set_alias(self, value): + if self.sys_pub: + raise api_errors.ModifyingSyspubException( + "Cannot set the alias of a system publisher" + ) + # Aliases must comply with the same restrictions that prefixes + # have as they are intended to be useable in any case where + # a prefix may be used. + if ( + value is not None + and value != "" + and not misc.valid_pub_prefix(value) + ): + raise api_errors.BadPublisherAlias(value) + self.__alias = value + + def __set_disabled(self, disabled): + if self.sys_pub: + raise api_errors.ModifyingSyspubException( + _("Cannot " "enable or disable a system publisher") + ) - if value is not None and not isinstance(value, dt.datetime): - raise api_errors.BadRepositoryAttributeValue( - "last_refreshed", value=value) + if disabled: + self.__disabled = True + else: + self.__disabled = False - lcfile = os.path.join(self.meta_root, "last_refreshed") - if not value: - # If no value was provided, attempt to remove the - # tracking file. - try: - portable.remove(lcfile) - except EnvironmentError as e: - # If the file can't be removed due to - # permissions, a read-only filesystem, or - # because it doesn't exist, continue on. - if e.errno not in (errno.ENOENT, errno.EACCES, - errno.EROFS): - raise - return - - def create_tracker(): - try: - # If the file is a symlink we catch an - # exception and do not update the file. - fd = os.open(lcfile, - os.O_WRONLY|os.O_NOFOLLOW|os.O_CREAT) - os.write(fd, misc.force_bytes("{0}\n".format( - misc.time_to_timestamp( - calendar.timegm(value.utctimetuple()))))) - os.close(fd) - except EnvironmentError as e: - if e.errno == errno.ELOOP: - raise api_errors.UnexpectedLinkError( - os.path.dirname(lcfile), - os.path.basename(lcfile), - e.errno) - # If the file can't be written due to - # permissions or because the filesystem is - # read-only, continue on. - if e.errno not in (errno.EACCES, errno.EROFS): - raise - try: - # If a time was provided, write out a special file that - # can be used to track the information with the actual - # time (in UTC) contained within. - create_tracker() - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise + def __set_last_refreshed(self, value): + if not self.meta_root: + return - # Assume meta_root doesn't exist and create it. - try: - self.create_meta_root() - except api_errors.PermissionsException: - # If the directory can't be created due to - # permissions, move on. - pass - except EnvironmentError as e: - # If the directory can't be created due to a - # read-only filesystem, move on. - if e.errno != errno.EROFS: - raise - else: - # Try one last time. - create_tracker() - - def __set_meta_root(self, pathname): - if pathname: - pathname = os.path.abspath(pathname) - self.__meta_root = pathname - if self._catalog: - self._catalog.meta_root = self.catalog_root - if self.__meta_root: - self.__origin_root = os.path.join(self.__meta_root, - "origins") - self.cert_root = os.path.join(self.__meta_root, "certs") - self.__subj_root = os.path.join(self.cert_root, - "subject_hashes") - self.__crl_root = os.path.join(self.cert_root, "crls") - - def __set_prefix(self, prefix): - if not misc.valid_pub_prefix(prefix): - raise api_errors.BadPublisherPrefix(prefix) - self.__prefix = prefix - - def __set_repository(self, value): - if not isinstance(value, Repository): - raise api_errors.UnknownRepository(value) - self.__repository = value - self._catalog = None + if value is not None and not isinstance(value, dt.datetime): + raise api_errors.BadRepositoryAttributeValue( + "last_refreshed", value=value + ) - def __set_client_uuid(self, value): - self.__client_uuid = value - - def __set_client_uuid_time(self, value): - self.__client_uuid_time = value - - def __set_stickiness(self, value): - if self.sys_pub: - raise api_errors.ModifyingSyspubException(_("Cannot " - "change the stickiness of a system publisher")) - self.__sticky = bool(value) - - def __str__(self): - return self.prefix - - def __validate_metadata(self, croot, repo): - """Private helper function to check the publisher's metadata - for configuration or other issues and log appropriate warnings - or errors. Currently only checks catalog metadata.""" - - c = pkg.catalog.Catalog(meta_root=croot, read_only=True) - if not c.exists: - # Nothing to validate. - return - if not c.version > 0: - # Validation doesn't apply. - return - if not c.package_count: - # Nothing to do. - return - - # XXX For now, perform this check using the catalog data. - # In the future, it should be done using the output of the - # publisher/0 operation. - pubs = c.publishers() - - if self.prefix not in pubs: - origins = repo.origins - origin = origins[0] - logger.error(_(""" + lcfile = os.path.join(self.meta_root, "last_refreshed") + if not value: + # If no value was provided, attempt to remove the + # tracking file. + try: + portable.remove(lcfile) + except EnvironmentError as e: + # If the file can't be removed due to + # permissions, a read-only filesystem, or + # because it doesn't exist, continue on. + if e.errno not in (errno.ENOENT, errno.EACCES, errno.EROFS): + raise + return + + def create_tracker(): + try: + # If the file is a symlink we catch an + # exception and do not update the file. + fd = os.open(lcfile, os.O_WRONLY | os.O_NOFOLLOW | os.O_CREAT) + os.write( + fd, + misc.force_bytes( + "{0}\n".format( + misc.time_to_timestamp( + calendar.timegm(value.utctimetuple()) + ) + ) + ), + ) + os.close(fd) + except EnvironmentError as e: + if e.errno == errno.ELOOP: + raise api_errors.UnexpectedLinkError( + os.path.dirname(lcfile), + os.path.basename(lcfile), + e.errno, + ) + # If the file can't be written due to + # permissions or because the filesystem is + # read-only, continue on. + if e.errno not in (errno.EACCES, errno.EROFS): + raise + + try: + # If a time was provided, write out a special file that + # can be used to track the information with the actual + # time (in UTC) contained within. + create_tracker() + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + + # Assume meta_root doesn't exist and create it. + try: + self.create_meta_root() + except api_errors.PermissionsException: + # If the directory can't be created due to + # permissions, move on. + pass + except EnvironmentError as e: + # If the directory can't be created due to a + # read-only filesystem, move on. + if e.errno != errno.EROFS: + raise + else: + # Try one last time. + create_tracker() + + def __set_meta_root(self, pathname): + if pathname: + pathname = os.path.abspath(pathname) + self.__meta_root = pathname + if self._catalog: + self._catalog.meta_root = self.catalog_root + if self.__meta_root: + self.__origin_root = os.path.join(self.__meta_root, "origins") + self.cert_root = os.path.join(self.__meta_root, "certs") + self.__subj_root = os.path.join(self.cert_root, "subject_hashes") + self.__crl_root = os.path.join(self.cert_root, "crls") + + def __set_prefix(self, prefix): + if not misc.valid_pub_prefix(prefix): + raise api_errors.BadPublisherPrefix(prefix) + self.__prefix = prefix + + def __set_repository(self, value): + if not isinstance(value, Repository): + raise api_errors.UnknownRepository(value) + self.__repository = value + self._catalog = None + + def __set_client_uuid(self, value): + self.__client_uuid = value + + def __set_client_uuid_time(self, value): + self.__client_uuid_time = value + + def __set_stickiness(self, value): + if self.sys_pub: + raise api_errors.ModifyingSyspubException( + _("Cannot " "change the stickiness of a system publisher") + ) + self.__sticky = bool(value) + + def __str__(self): + return self.prefix + + def __validate_metadata(self, croot, repo): + """Private helper function to check the publisher's metadata + for configuration or other issues and log appropriate warnings + or errors. Currently only checks catalog metadata.""" + + c = pkg.catalog.Catalog(meta_root=croot, read_only=True) + if not c.exists: + # Nothing to validate. + return + if not c.version > 0: + # Validation doesn't apply. + return + if not c.package_count: + # Nothing to do. + return + + # XXX For now, perform this check using the catalog data. + # In the future, it should be done using the output of the + # publisher/0 operation. + pubs = c.publishers() + + if self.prefix not in pubs: + origins = repo.origins + origin = origins[0] + logger.error( + _( + """ Unable to retrieve package data for publisher '{prefix}' from one of the following origin(s): @@ -1549,1778 +1785,1939 @@ def __validate_metadata(self, croot, repo): The catalog retrieved from one of the origin(s) listed above only contains package data for: {pubs}. -""").format(origins="\n".join(str(o) for o in origins), prefix=self.prefix, - pubs=", ".join(pubs))) +""" + ).format( + origins="\n".join(str(o) for o in origins), + prefix=self.prefix, + pubs=", ".join(pubs), + ) + ) - if global_settings.client_name != "pkg": - logger.error(_("""\ + if global_settings.client_name != "pkg": + logger.error( + _( + """\ This is either a result of invalid origin information being provided for publisher '{0}', or because the wrong publisher name was provided when this publisher was added. -""").format(self.prefix)) - # Remaining messages are for pkg client only. - return - - logger.error(_("""\ +""" + ).format(self.prefix) + ) + # Remaining messages are for pkg client only. + return + + logger.error( + _( + """\ To resolve this issue, correct the origin information provided for publisher '{prefix}' using the pkg set-publisher subcommand, or re-add the publisher using the correct name and remove the '{prefix}' publisher. -""").format(prefix=self.prefix)) +""" + ).format(prefix=self.prefix) + ) - if len(pubs) == 1: - logger.warning(_("""\ + if len(pubs) == 1: + logger.warning( + _( + """\ To re-add this publisher with the correct name, execute the following commands as a privileged user: pkg set-publisher -P -g {origin} {pub} pkg unset-publisher {prefix} -""").format(origin=origin, prefix=self.prefix, pub=list(pubs)[0])) - return - - logger.warning(_("""\ +""" + ).format( + origin=origin, prefix=self.prefix, pub=list(pubs)[0] + ) + ) + return + + logger.warning( + _( + """\ The origin(s) listed above contain package data for more than one publisher, but this issue can likely be resolved by executing one of the following commands as a privileged user: -""")) +""" + ) + ) - for pfx in pubs: - logger.warning(_("pkg set-publisher -P -g " - "{origin} {pub}\n").format( - origin=origin, pub=pfx)) + for pfx in pubs: + logger.warning( + _("pkg set-publisher -P -g " "{origin} {pub}\n").format( + origin=origin, pub=pfx + ) + ) - logger.warning(_("""\ + logger.warning( + _( + """\ Afterwards, the old publisher should be removed by executing the following command as a privileged user: pkg unset-publisher {0} -""").format(self.prefix)) - - @property - def catalog(self): - """A reference to the Catalog object for the publisher's - selected repository, or None if available.""" - - if not self.meta_root: - if self._catalog: - return self._catalog - return None - - if not self._catalog: - croot = self.catalog_root - if not os.path.isdir(croot): - # Current meta_root structure is likely in - # a state of transition, so don't provide a - # meta_root. Assume that an empty catalog - # is desired instead. (This can happen during - # an image format upgrade.) - croot = None - self._catalog = pkg.catalog.Catalog( - meta_root=croot) - return self._catalog - - @property - def catalog_root(self): - """The absolute pathname of the directory containing the - Catalog data for the publisher, or None if meta_root is - not defined.""" - - if self.meta_root: - return os.path.join(self.meta_root, "catalog") - - def create_meta_root(self): - """Create the publisher's meta_root.""" - - if not self.meta_root: - raise api_errors.BadPublisherMetaRoot(self.meta_root, - operation="create_meta_root") +""" + ).format(self.prefix) + ) - for path in (self.meta_root, self.catalog_root): - try: - os.makedirs(path) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - elif e.errno != errno.EEXIST: - # If the path already exists, move on. - # Otherwise, raise the exception. - raise - # Optional roots not needed for all operations. - for path in (self.cert_root, self.__origin_root, - self.__subj_root, self.__crl_root): - try: - os.makedirs(path) - except EnvironmentError as e: - if e.errno in (errno.EACCES, errno.EROFS): - pass - elif e.errno != errno.EEXIST: - # If the path already exists, move on. - # Otherwise, raise the exception. - raise - - def get_origin_sets(self): - """Returns a list of Repository objects representing the unique - groups of origins available. Each group is based on the origins - that share identical package catalog data.""" - - if not self.repository or not self.repository.origins: - # Guard against failure for publishers with no - # transport information. - return [] - - if not self.meta_root or not os.path.exists(self.__origin_root): - # No way to identify unique sets. - return [self.repository] - - # Index origins by tuple of (catalog creation, catalog modified) - osets = collections.defaultdict(list) - - for origin, opath in self.__gen_origin_paths(): - cat = pkg.catalog.Catalog(meta_root=opath, - read_only=True) - if not cat.exists: - key = None - else: - key = (str(cat.created), str(cat.last_modified)) - osets[key].append(origin) - - # Now return a list of Repository objects (copies of the - # currently selected one) assigning each set of origins. - # Sort by index to ensure consistent ordering. - rval = [] - for k in sorted(osets): - nrepo = copy.copy(self.repository) - nrepo.origins = osets[k] - rval.append(nrepo) - - return rval - - def has_configuration(self): - """Returns whether this publisher has any configuration which - should prevent its removal.""" - - return bool(self.__repository.origins or - self.__repository.mirrors or self.__sig_policy or - self.approved_ca_certs or self.revoked_ca_certs) - - @property - def needs_refresh(self): - """A boolean value indicating whether the publisher's - metadata for the currently selected repository needs to be - refreshed.""" - - if not self.repository or not self.meta_root: - # Nowhere to obtain metadata from; this should rarely - # occur except during publisher initialization. - return False - - lc = self.last_refreshed - if not lc: - # There is no record of when the publisher metadata was - # last refreshed, so assume it should be refreshed now. - return True - - ts_now = time.time() - ts_last = calendar.timegm(lc.utctimetuple()) - - rs = self.repository.refresh_seconds - if not rs: - # There is no indicator of how often often publisher - # metadata should be refreshed, so assume it should be - # now. - return True - - if (ts_now - ts_last) >= rs: - # The number of seconds that has elapsed since the - # publisher metadata was last refreshed exceeds or - # equals the specified interval. - return True - return False - - def __get_origin_path(self, origin): - if not os.path.exists(self.__origin_root): - return - # A digest of the URI string is used here to attempt to avoid - # path length problems. In order for this image to interoperate - # with older clients, we must use sha-1 here. - return os.path.join(self.__origin_root, - hashlib.sha1(misc.force_bytes(origin.uri)).hexdigest()) - - def __gen_origin_paths(self): - if not os.path.exists(self.__origin_root): - return - for origin in self.repository.origins: - if not origin.disabled: - yield origin, self.__get_origin_path(origin) - - def __rebuild_catalog(self): - """Private helper function that builds publisher catalog based - on catalog from each origin.""" - - # First, remove catalogs for any origins that no longer exist or - # are disabled. - # We must interoperate with older clients, so force the use of - # sha-1 here. - ohashes = [ - hashlib.sha1(misc.force_bytes(o.uri)).hexdigest() - for o in self.repository.origins - if not o.disabled - ] - - removals = False - for entry in os.listdir(self.__origin_root): - opath = os.path.join(self.__origin_root, entry) - try: - if entry in ohashes: - continue - except Exception: - # Discard anything that isn't an origin. - pass - - # An origin was removed or disabled, so publisher should - # inform image to force image catalog rebuild. - removals = True - - # Not an origin or origin no longer exists; either way, - # it shouldn't exist here. - try: - if os.path.isdir(opath): - shutil.rmtree(opath) - else: - portable.remove(opath) - except EnvironmentError as e: - raise api_errors._convert_error(e) - - # if the catalog already exists on disk, is empty, and if - # no origins are configured or all origins are disabled, we're - # done. - if self.catalog.exists and \ - self.catalog.package_count == 0 and \ - (not self.repository.origins - or all(o.disabled for o in self.repository.origins)): - return removals - - # Discard existing catalog. - self.catalog.destroy() - self._catalog = None + @property + def catalog(self): + """A reference to the Catalog object for the publisher's + selected repository, or None if available.""" - # Ensure all old catalog files are removed. - for entry in os.listdir(self.catalog_root): - if entry == "attrs" or entry == "catalog" or \ - entry.startswith("catalog."): - try: - portable.remove(os.path.join( - self.catalog_root, entry)) - except EnvironmentError as e: - raise apx._convert_error(e) - - # If there's only one origin, then just symlink its catalog - # files into place. - # Symlinking includes updates for publication tools. - opaths = [entry for entry in self.__gen_origin_paths()] - if len(opaths) == 1: - opath = opaths[0][1] - for fname in os.listdir(opath): - if fname.startswith("catalog.") or \ - fname.startswith("update."): - src = os.path.join(opath, fname) - dest = os.path.join(self.catalog_root, - fname) - os.symlink(misc.relpath(src, - self.catalog_root), dest) - return removals - - # If there's more than one origin, then create a new catalog - # based on a composite of the catalogs for all origins. - ncat = pkg.catalog.Catalog(batch_mode=True, - meta_root=self.catalog_root, sign=False) - - # Mark all operations as occurring at this time. - op_time = dt.datetime.utcnow() - - for origin, opath in opaths: - src_cat = pkg.catalog.Catalog(meta_root=opath, - read_only=True) - for name in src_cat.parts: - spart = src_cat.get_part(name, must_exist=True) - if spart is None: - # Client hasn't retrieved this part. - continue - - npart = ncat.get_part(name) - base = name.startswith("catalog.base.") - - # Avoid accessor overhead since these will be - # used for every entry. - cat_ver = src_cat.version - - for t, sentry in spart.tuple_entries( - pubs=[self.prefix]): - pub, stem, ver = t - - entry = dict(six.iteritems(sentry)) - try: - npart.add(metadata=entry, - op_time=op_time, pub=pub, - stem=stem, ver=ver) - except api_errors.DuplicateCatalogEntry: - if not base: - # Don't care. - continue - - # Destination entry is in - # catalog already. - entry = npart.get_entry( - pub=pub, stem=stem, ver=ver) - - src_sigs = set( - s - for s in sentry - if s.startswith("signature-") - ) - dest_sigs = set( - s - for s in entry - if s.startswith("signature-") - ) - - if src_sigs != dest_sigs: - # Ignore any packages - # that are different - # from the first - # encountered for this - # package version. - # The client expects - # these to always be - # the same. This seems - # saner than failing. - continue - else: - if not base: - # Nothing to do. - continue - - # Destination entry is one just - # added. - entry["metadata"] = { - "sources": [], - "states": [], - } - - entry["metadata"]["sources"].append( - origin.uri) - - states = entry["metadata"]["states"] - if src_cat.version == 0: - states.append( - pkgdefs.PKG_STATE_V0) - - # Now go back and trim each entry to minimize footprint. This - # ensures each package entry only has state and source info - # recorded when needed. - for t, entry in ncat.tuple_entries(): - pub, stem, ver = t - mdata = entry["metadata"] - if len(mdata["sources"]) == len(opaths): - # Package is available from all origins, so - # there's no need to require which ones - # have it. - del mdata["sources"] - - if len(mdata["states"]) < len(opaths): - # At least one source is not V0, so the lazy- - # load fallback for the package metadata isn't - # needed. - del mdata["states"] - elif len(mdata["states"]) > 1: - # Ensure only one instance of state value. - mdata["states"] = [pkgdefs.PKG_STATE_V0] - if not mdata: - mdata = None - ncat.update_entry(mdata, pub=pub, stem=stem, ver=ver) - - # Finally, write out publisher catalog. - ncat.batch_mode = False - ncat.finalize() - ncat.save() - return removals - - def __convert_v0_catalog(self, v0_cat, v1_root): - """Transforms the contents of the provided version 0 Catalog - into a version 1 Catalog, replacing the current Catalog.""" + if not self.meta_root: + if self._catalog: + return self._catalog + return None + + if not self._catalog: + croot = self.catalog_root + if not os.path.isdir(croot): + # Current meta_root structure is likely in + # a state of transition, so don't provide a + # meta_root. Assume that an empty catalog + # is desired instead. (This can happen during + # an image format upgrade.) + croot = None + self._catalog = pkg.catalog.Catalog(meta_root=croot) + return self._catalog + + @property + def catalog_root(self): + """The absolute pathname of the directory containing the + Catalog data for the publisher, or None if meta_root is + not defined.""" + + if self.meta_root: + return os.path.join(self.meta_root, "catalog") + + def create_meta_root(self): + """Create the publisher's meta_root.""" + + if not self.meta_root: + raise api_errors.BadPublisherMetaRoot( + self.meta_root, operation="create_meta_root" + ) - v0_lm = v0_cat.last_modified() - if v0_lm: - # last_modified can be none if the catalog is empty. - v0_lm = pkg.catalog.ts_to_datetime(v0_lm) - - # There's no point in signing this catalog since it's simply - # a transformation of a v0 catalog. - v1_cat = pkg.catalog.Catalog(batch_mode=True, - meta_root=v1_root, sign=False) - - # A check for a previous non-zero package count is made to - # determine whether the last_modified date alone can be - # relied on. This works around some oddities with empty - # v0 catalogs. + for path in (self.meta_root, self.catalog_root): + try: + os.makedirs(path) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + elif e.errno != errno.EEXIST: + # If the path already exists, move on. + # Otherwise, raise the exception. + raise + # Optional roots not needed for all operations. + for path in ( + self.cert_root, + self.__origin_root, + self.__subj_root, + self.__crl_root, + ): + try: + os.makedirs(path) + except EnvironmentError as e: + if e.errno in (errno.EACCES, errno.EROFS): + pass + elif e.errno != errno.EEXIST: + # If the path already exists, move on. + # Otherwise, raise the exception. + raise + + def get_origin_sets(self): + """Returns a list of Repository objects representing the unique + groups of origins available. Each group is based on the origins + that share identical package catalog data.""" + + if not self.repository or not self.repository.origins: + # Guard against failure for publishers with no + # transport information. + return [] + + if not self.meta_root or not os.path.exists(self.__origin_root): + # No way to identify unique sets. + return [self.repository] + + # Index origins by tuple of (catalog creation, catalog modified) + osets = collections.defaultdict(list) + + for origin, opath in self.__gen_origin_paths(): + cat = pkg.catalog.Catalog(meta_root=opath, read_only=True) + if not cat.exists: + key = None + else: + key = (str(cat.created), str(cat.last_modified)) + osets[key].append(origin) + + # Now return a list of Repository objects (copies of the + # currently selected one) assigning each set of origins. + # Sort by index to ensure consistent ordering. + rval = [] + for k in sorted(osets): + nrepo = copy.copy(self.repository) + nrepo.origins = osets[k] + rval.append(nrepo) + + return rval + + def has_configuration(self): + """Returns whether this publisher has any configuration which + should prevent its removal.""" + + return bool( + self.__repository.origins + or self.__repository.mirrors + or self.__sig_policy + or self.approved_ca_certs + or self.revoked_ca_certs + ) + + @property + def needs_refresh(self): + """A boolean value indicating whether the publisher's + metadata for the currently selected repository needs to be + refreshed.""" + + if not self.repository or not self.meta_root: + # Nowhere to obtain metadata from; this should rarely + # occur except during publisher initialization. + return False + + lc = self.last_refreshed + if not lc: + # There is no record of when the publisher metadata was + # last refreshed, so assume it should be refreshed now. + return True + + ts_now = time.time() + ts_last = calendar.timegm(lc.utctimetuple()) + + rs = self.repository.refresh_seconds + if not rs: + # There is no indicator of how often often publisher + # metadata should be refreshed, so assume it should be + # now. + return True + + if (ts_now - ts_last) >= rs: + # The number of seconds that has elapsed since the + # publisher metadata was last refreshed exceeds or + # equals the specified interval. + return True + return False + + def __get_origin_path(self, origin): + if not os.path.exists(self.__origin_root): + return + # A digest of the URI string is used here to attempt to avoid + # path length problems. In order for this image to interoperate + # with older clients, we must use sha-1 here. + return os.path.join( + self.__origin_root, + hashlib.sha1(misc.force_bytes(origin.uri)).hexdigest(), + ) + + def __gen_origin_paths(self): + if not os.path.exists(self.__origin_root): + return + for origin in self.repository.origins: + if not origin.disabled: + yield origin, self.__get_origin_path(origin) + + def __rebuild_catalog(self): + """Private helper function that builds publisher catalog based + on catalog from each origin.""" + + # First, remove catalogs for any origins that no longer exist or + # are disabled. + # We must interoperate with older clients, so force the use of + # sha-1 here. + ohashes = [ + hashlib.sha1(misc.force_bytes(o.uri)).hexdigest() + for o in self.repository.origins + if not o.disabled + ] + + removals = False + for entry in os.listdir(self.__origin_root): + opath = os.path.join(self.__origin_root, entry) + try: + if entry in ohashes: + continue + except Exception: + # Discard anything that isn't an origin. + pass + + # An origin was removed or disabled, so publisher should + # inform image to force image catalog rebuild. + removals = True + + # Not an origin or origin no longer exists; either way, + # it shouldn't exist here. + try: + if os.path.isdir(opath): + shutil.rmtree(opath) + else: + portable.remove(opath) + except EnvironmentError as e: + raise api_errors._convert_error(e) + + # if the catalog already exists on disk, is empty, and if + # no origins are configured or all origins are disabled, we're + # done. + if ( + self.catalog.exists + and self.catalog.package_count == 0 + and ( + not self.repository.origins + or all(o.disabled for o in self.repository.origins) + ) + ): + return removals + + # Discard existing catalog. + self.catalog.destroy() + self._catalog = None + + # Ensure all old catalog files are removed. + for entry in os.listdir(self.catalog_root): + if ( + entry == "attrs" + or entry == "catalog" + or entry.startswith("catalog.") + ): try: - # Could be 'None' - n0_pkgs = int(v0_cat.npkgs()) - except (TypeError, ValueError): - n0_pkgs = 0 - - if v1_cat.exists and n0_pkgs != v1_cat.package_version_count: - if v0_lm == v1_cat.last_modified: - # Already converted. - return - # Simply rebuild the entire v1 catalog every time, this - # avoids many of the problems that could happen due to - # deficiencies in the v0 implementation. - v1_cat.destroy() - self._catalog = None - v1_cat = pkg.catalog.Catalog(meta_root=v1_root, - sign=False) - - # Now populate the v1 Catalog with the v0 Catalog's data. - for f in v0_cat.fmris(): - v1_cat.add_package(f) - - # Normally, the Catalog's attributes are automatically - # populated as a result of catalog operations. But in - # this case, we want the v1 Catalog's attributes to - # match those of the v0 catalog. - v1_cat.last_modified = v0_lm - - # While this is a v1 catalog format-wise, v0 data is stored. - # This allows consumers to be aware that certain data won't be - # available in this catalog (such as dependencies, etc.). - v1_cat.version = 0 - - # Finally, save the new Catalog, and replace the old in-memory - # catalog. - v1_cat.batch_mode = False - v1_cat.finalize() - v1_cat.save() - - def __refresh_v0(self, croot, full_refresh, immediate, repo): - """The method to refresh the publisher's metadata against - a catalog/0 source. If the more recent catalog/1 version - isn't supported, this routine gets invoked as a fallback. - Returns a tuple of (changed, refreshed) where 'changed' - indicates whether new catalog data was found and 'refreshed' - indicates that catalog data was actually retrieved to determine - if there were any updates.""" - - if full_refresh: - immediate = True - - # Catalog needs v0 -> v1 transformation if repository only - # offers v0 catalog. - v0_cat = old_catalog.ServerCatalog(croot, read_only=True, - publisher=self.prefix) - - new_cat = True - v0_lm = None - if v0_cat.exists: - repo = self.repository - if full_refresh or v0_cat.origin() not in repo.origins: - try: - v0_cat.destroy(root=croot) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - immediate = True - else: - new_cat = False - v0_lm = v0_cat.last_modified() - - if not immediate and not self.needs_refresh: - # No refresh needed. - return False, False - - import pkg.updatelog as old_ulog + portable.remove(os.path.join(self.catalog_root, entry)) + except EnvironmentError as e: + raise apx._convert_error(e) + + # If there's only one origin, then just symlink its catalog + # files into place. + # Symlinking includes updates for publication tools. + opaths = [entry for entry in self.__gen_origin_paths()] + if len(opaths) == 1: + opath = opaths[0][1] + for fname in os.listdir(opath): + if fname.startswith("catalog.") or fname.startswith("update."): + src = os.path.join(opath, fname) + dest = os.path.join(self.catalog_root, fname) + os.symlink(misc.relpath(src, self.catalog_root), dest) + return removals + + # If there's more than one origin, then create a new catalog + # based on a composite of the catalogs for all origins. + ncat = pkg.catalog.Catalog( + batch_mode=True, meta_root=self.catalog_root, sign=False + ) + + # Mark all operations as occurring at this time. + op_time = dt.datetime.utcnow() + + for origin, opath in opaths: + src_cat = pkg.catalog.Catalog(meta_root=opath, read_only=True) + for name in src_cat.parts: + spart = src_cat.get_part(name, must_exist=True) + if spart is None: + # Client hasn't retrieved this part. + continue + + npart = ncat.get_part(name) + base = name.startswith("catalog.base.") + + # Avoid accessor overhead since these will be + # used for every entry. + cat_ver = src_cat.version + + for t, sentry in spart.tuple_entries(pubs=[self.prefix]): + pub, stem, ver = t + + entry = dict(six.iteritems(sentry)) + try: + npart.add( + metadata=entry, + op_time=op_time, + pub=pub, + stem=stem, + ver=ver, + ) + except api_errors.DuplicateCatalogEntry: + if not base: + # Don't care. + continue + + # Destination entry is in + # catalog already. + entry = npart.get_entry(pub=pub, stem=stem, ver=ver) + + src_sigs = set( + s for s in sentry if s.startswith("signature-") + ) + dest_sigs = set( + s for s in entry if s.startswith("signature-") + ) + + if src_sigs != dest_sigs: + # Ignore any packages + # that are different + # from the first + # encountered for this + # package version. + # The client expects + # these to always be + # the same. This seems + # saner than failing. + continue + else: + if not base: + # Nothing to do. + continue + + # Destination entry is one just + # added. + entry["metadata"] = { + "sources": [], + "states": [], + } + + entry["metadata"]["sources"].append(origin.uri) + + states = entry["metadata"]["states"] + if src_cat.version == 0: + states.append(pkgdefs.PKG_STATE_V0) + + # Now go back and trim each entry to minimize footprint. This + # ensures each package entry only has state and source info + # recorded when needed. + for t, entry in ncat.tuple_entries(): + pub, stem, ver = t + mdata = entry["metadata"] + if len(mdata["sources"]) == len(opaths): + # Package is available from all origins, so + # there's no need to require which ones + # have it. + del mdata["sources"] + + if len(mdata["states"]) < len(opaths): + # At least one source is not V0, so the lazy- + # load fallback for the package metadata isn't + # needed. + del mdata["states"] + elif len(mdata["states"]) > 1: + # Ensure only one instance of state value. + mdata["states"] = [pkgdefs.PKG_STATE_V0] + if not mdata: + mdata = None + ncat.update_entry(mdata, pub=pub, stem=stem, ver=ver) + + # Finally, write out publisher catalog. + ncat.batch_mode = False + ncat.finalize() + ncat.save() + return removals + + def __convert_v0_catalog(self, v0_cat, v1_root): + """Transforms the contents of the provided version 0 Catalog + into a version 1 Catalog, replacing the current Catalog.""" + + v0_lm = v0_cat.last_modified() + if v0_lm: + # last_modified can be none if the catalog is empty. + v0_lm = pkg.catalog.ts_to_datetime(v0_lm) + + # There's no point in signing this catalog since it's simply + # a transformation of a v0 catalog. + v1_cat = pkg.catalog.Catalog( + batch_mode=True, meta_root=v1_root, sign=False + ) + + # A check for a previous non-zero package count is made to + # determine whether the last_modified date alone can be + # relied on. This works around some oddities with empty + # v0 catalogs. + try: + # Could be 'None' + n0_pkgs = int(v0_cat.npkgs()) + except (TypeError, ValueError): + n0_pkgs = 0 + + if v1_cat.exists and n0_pkgs != v1_cat.package_version_count: + if v0_lm == v1_cat.last_modified: + # Already converted. + return + # Simply rebuild the entire v1 catalog every time, this + # avoids many of the problems that could happen due to + # deficiencies in the v0 implementation. + v1_cat.destroy() + self._catalog = None + v1_cat = pkg.catalog.Catalog(meta_root=v1_root, sign=False) + + # Now populate the v1 Catalog with the v0 Catalog's data. + for f in v0_cat.fmris(): + v1_cat.add_package(f) + + # Normally, the Catalog's attributes are automatically + # populated as a result of catalog operations. But in + # this case, we want the v1 Catalog's attributes to + # match those of the v0 catalog. + v1_cat.last_modified = v0_lm + + # While this is a v1 catalog format-wise, v0 data is stored. + # This allows consumers to be aware that certain data won't be + # available in this catalog (such as dependencies, etc.). + v1_cat.version = 0 + + # Finally, save the new Catalog, and replace the old in-memory + # catalog. + v1_cat.batch_mode = False + v1_cat.finalize() + v1_cat.save() + + def __refresh_v0(self, croot, full_refresh, immediate, repo): + """The method to refresh the publisher's metadata against + a catalog/0 source. If the more recent catalog/1 version + isn't supported, this routine gets invoked as a fallback. + Returns a tuple of (changed, refreshed) where 'changed' + indicates whether new catalog data was found and 'refreshed' + indicates that catalog data was actually retrieved to determine + if there were any updates.""" + + if full_refresh: + immediate = True + + # Catalog needs v0 -> v1 transformation if repository only + # offers v0 catalog. + v0_cat = old_catalog.ServerCatalog( + croot, read_only=True, publisher=self.prefix + ) + + new_cat = True + v0_lm = None + if v0_cat.exists: + repo = self.repository + if full_refresh or v0_cat.origin() not in repo.origins: try: - # Note that this currently retrieves a v0 catalog that - # has to be converted to v1 format. - self.transport.get_catalog(self, v0_lm, path=croot, - alt_repo=repo) - except old_ulog.UpdateLogException: - # If an incremental update fails, attempt a full - # catalog retrieval instead. - try: - v0_cat.destroy(root=croot) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - self.transport.get_catalog(self, path=croot, - alt_repo=repo) - - v0_cat = pkg.server.catalog.ServerCatalog(croot, read_only=True, - publisher=self.prefix) - - self.__convert_v0_catalog(v0_cat, croot) - if new_cat or v0_lm != v0_cat.last_modified(): - # If the catalog was rebuilt, or the timestamp of the - # catalog changed, then an update has occurred. - return True, True - return False, True - - def __refresh_v1(self, croot, tempdir, full_refresh, immediate, - mismatched, repo, progtrack=None, include_updates=False): - """The method to refresh the publisher's metadata against - a catalog/1 source. If the more recent catalog/1 version - isn't supported, __refresh_v0 is invoked as a fallback. - Returns a tuple of (changed, refreshed) where 'changed' - indicates whether new catalog data was found and 'refreshed' - indicates that catalog data was actually retrieved to determine - if there were any updates.""" - - # If full_refresh is True, then redownload should be True to - # ensure a non-cached version of the catalog is retrieved. - # If full_refresh is False, but mismatched is True, then - # the retrieval requests should indicate that content should - # be revalidated before being returned. Note that this - # only applies to the catalog v1 case. - redownload = full_refresh - revalidate = not redownload and mismatched + v0_cat.destroy(root=croot) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + immediate = True + else: + new_cat = False + v0_lm = v0_cat.last_modified() - v1_cat = pkg.catalog.Catalog(meta_root=croot) - try: - self.transport.get_catalog1(self, ["catalog.attrs"], - path=tempdir, redownload=redownload, - revalidate=revalidate, alt_repo=repo, - progtrack=progtrack) - except api_errors.UnsupportedRepositoryOperation: - # No v1 catalogs available. - if v1_cat.exists: - # Ensure v1 -> v0 transition works right. - v1_cat.destroy() - self._catalog = None - return self.__refresh_v0(croot, full_refresh, immediate, - repo) - - # If a v0 catalog is present, remove it before proceeding to - # ensure transitions between catalog versions work correctly. - v0_cat = old_catalog.ServerCatalog(croot, read_only=True, - publisher=self.prefix) - if v0_cat.exists: - v0_cat.destroy(root=croot) - - # If above succeeded, we now have a catalog.attrs file. Parse - # this to determine what other constituent parts need to be - # downloaded. - flist = [] - if not full_refresh and v1_cat.exists: - flist = v1_cat.get_updates_needed(tempdir) - if flist == None: - return False, True - else: - attrs = pkg.catalog.CatalogAttrs(meta_root=tempdir) - for name in attrs.parts: - locale = name.split(".", 2)[2] - # XXX Skip parts that aren't in the C locale for - # now. - if locale != "C": - continue - flist.append(name) - if include_updates: - for update in attrs.updates: - flist.append(update) - - if flist: - # More catalog files to retrieve. - try: - self.transport.get_catalog1(self, flist, - path=tempdir, redownload=redownload, - revalidate=revalidate, alt_repo=repo, - progtrack=progtrack) - except api_errors.UnsupportedRepositoryOperation: - # Couldn't find a v1 catalog after getting one - # before. This would be a bizzare error, but we - # can try for a v0 catalog anyway. - return self.__refresh_v0(croot, full_refresh, - immediate, repo) - - # Clear _catalog, so we'll read in the new catalog. + if not immediate and not self.needs_refresh: + # No refresh needed. + return False, False + + import pkg.updatelog as old_ulog + + try: + # Note that this currently retrieves a v0 catalog that + # has to be converted to v1 format. + self.transport.get_catalog(self, v0_lm, path=croot, alt_repo=repo) + except old_ulog.UpdateLogException: + # If an incremental update fails, attempt a full + # catalog retrieval instead. + try: + v0_cat.destroy(root=croot) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + self.transport.get_catalog(self, path=croot, alt_repo=repo) + + v0_cat = pkg.server.catalog.ServerCatalog( + croot, read_only=True, publisher=self.prefix + ) + + self.__convert_v0_catalog(v0_cat, croot) + if new_cat or v0_lm != v0_cat.last_modified(): + # If the catalog was rebuilt, or the timestamp of the + # catalog changed, then an update has occurred. + return True, True + return False, True + + def __refresh_v1( + self, + croot, + tempdir, + full_refresh, + immediate, + mismatched, + repo, + progtrack=None, + include_updates=False, + ): + """The method to refresh the publisher's metadata against + a catalog/1 source. If the more recent catalog/1 version + isn't supported, __refresh_v0 is invoked as a fallback. + Returns a tuple of (changed, refreshed) where 'changed' + indicates whether new catalog data was found and 'refreshed' + indicates that catalog data was actually retrieved to determine + if there were any updates.""" + + # If full_refresh is True, then redownload should be True to + # ensure a non-cached version of the catalog is retrieved. + # If full_refresh is False, but mismatched is True, then + # the retrieval requests should indicate that content should + # be revalidated before being returned. Note that this + # only applies to the catalog v1 case. + redownload = full_refresh + revalidate = not redownload and mismatched + + v1_cat = pkg.catalog.Catalog(meta_root=croot) + try: + self.transport.get_catalog1( + self, + ["catalog.attrs"], + path=tempdir, + redownload=redownload, + revalidate=revalidate, + alt_repo=repo, + progtrack=progtrack, + ) + except api_errors.UnsupportedRepositoryOperation: + # No v1 catalogs available. + if v1_cat.exists: + # Ensure v1 -> v0 transition works right. + v1_cat.destroy() self._catalog = None + return self.__refresh_v0(croot, full_refresh, immediate, repo) + + # If a v0 catalog is present, remove it before proceeding to + # ensure transitions between catalog versions work correctly. + v0_cat = old_catalog.ServerCatalog( + croot, read_only=True, publisher=self.prefix + ) + if v0_cat.exists: + v0_cat.destroy(root=croot) + + # If above succeeded, we now have a catalog.attrs file. Parse + # this to determine what other constituent parts need to be + # downloaded. + flist = [] + if not full_refresh and v1_cat.exists: + flist = v1_cat.get_updates_needed(tempdir) + if flist == None: + return False, True + else: + attrs = pkg.catalog.CatalogAttrs(meta_root=tempdir) + for name in attrs.parts: + locale = name.split(".", 2)[2] + # XXX Skip parts that aren't in the C locale for + # now. + if locale != "C": + continue + flist.append(name) + if include_updates: + for update in attrs.updates: + flist.append(update) + + if flist: + # More catalog files to retrieve. + try: + self.transport.get_catalog1( + self, + flist, + path=tempdir, + redownload=redownload, + revalidate=revalidate, + alt_repo=repo, + progtrack=progtrack, + ) + except api_errors.UnsupportedRepositoryOperation: + # Couldn't find a v1 catalog after getting one + # before. This would be a bizzare error, but we + # can try for a v0 catalog anyway. + return self.__refresh_v0(croot, full_refresh, immediate, repo) + + # Clear _catalog, so we'll read in the new catalog. + self._catalog = None + v1_cat = pkg.catalog.Catalog(meta_root=croot) + + # At this point the client should have a set of the constituent + # pieces that are necessary to construct a catalog. If a + # catalog already exists, call apply_updates. Otherwise, + # move the files to the appropriate location. + validate = False + if not full_refresh and v1_cat.exists: + v1_cat.apply_updates(tempdir) + else: + if v1_cat.exists: + # This is a full refresh. Destroy + # the existing catalog. + v1_cat.destroy() + + for fn in os.listdir(tempdir): + srcpath = os.path.join(tempdir, fn) + dstpath = os.path.join(croot, fn) + pkg.portable.rename(srcpath, dstpath) + + # Apply_updates validates the newly constructed catalog. + # If refresh didn't call apply_updates, arrange to + # have the new catalog validated. + validate = True + + if validate: + try: v1_cat = pkg.catalog.Catalog(meta_root=croot) + v1_cat.validate() + except api_errors.BadCatalogSignatures: + # If signature validation fails here, that means + # that the attributes and individual parts were + # self-consistent and not corrupt, but that the + # attributes and parts didn't match. This could + # be the result of a broken source providing + # an attributes file that is much older or newer + # than the catalog parts being provided. + v1_cat.destroy() + raise api_errors.MismatchedCatalog(self.prefix) + return True, True + + def __refresh_origin( + self, + croot, + full_refresh, + immediate, + mismatched, + origin, + progtrack=None, + include_updates=False, + ): + """Private helper method used to refresh catalog data for each + origin. Returns a tuple of (changed, refreshed) where 'changed' + indicates whether new catalog data was found and 'refreshed' + indicates that catalog data was actually retrieved to determine + if there were any updates.""" + + # Create a copy of the current repository object that only + # contains the origin specified. + repo = copy.copy(self.repository) + repo.origins = [origin] + + # Create temporary directory for assembly of catalog pieces. + try: + misc.makedirs(croot) + tempdir = tempfile.mkdtemp(dir=croot) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + + # Make a test contact to the repo to see if it is responding. + # We need to pass in a publisher object which only has one + # origin so create one from our current publisher. + test_pub = copy.copy(self) + test_pub.repository = repo + self.transport.version_check(test_pub) + + # Ensure that the temporary directory gets removed regardless + # of success or failure. + try: + rval = self.__refresh_v1( + croot, + tempdir, + full_refresh, + immediate, + mismatched, + repo, + progtrack=progtrack, + include_updates=include_updates, + ) - # At this point the client should have a set of the constituent - # pieces that are necessary to construct a catalog. If a - # catalog already exists, call apply_updates. Otherwise, - # move the files to the appropriate location. - validate = False - if not full_refresh and v1_cat.exists: - v1_cat.apply_updates(tempdir) - else: - if v1_cat.exists: - # This is a full refresh. Destroy - # the existing catalog. - v1_cat.destroy() - - for fn in os.listdir(tempdir): - srcpath = os.path.join(tempdir, fn) - dstpath = os.path.join(croot, fn) - pkg.portable.rename(srcpath, dstpath) - - # Apply_updates validates the newly constructed catalog. - # If refresh didn't call apply_updates, arrange to - # have the new catalog validated. - validate = True - - if validate: - try: - v1_cat = pkg.catalog.Catalog(meta_root=croot) - v1_cat.validate() - except api_errors.BadCatalogSignatures: - # If signature validation fails here, that means - # that the attributes and individual parts were - # self-consistent and not corrupt, but that the - # attributes and parts didn't match. This could - # be the result of a broken source providing - # an attributes file that is much older or newer - # than the catalog parts being provided. - v1_cat.destroy() - raise api_errors.MismatchedCatalog(self.prefix) - return True, True - - def __refresh_origin(self, croot, full_refresh, immediate, mismatched, - origin, progtrack=None, include_updates=False): - """Private helper method used to refresh catalog data for each - origin. Returns a tuple of (changed, refreshed) where 'changed' - indicates whether new catalog data was found and 'refreshed' - indicates that catalog data was actually retrieved to determine - if there were any updates.""" - - # Create a copy of the current repository object that only - # contains the origin specified. - repo = copy.copy(self.repository) - repo.origins = [origin] - - # Create temporary directory for assembly of catalog pieces. - try: - misc.makedirs(croot) - tempdir = tempfile.mkdtemp(dir=croot) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - raise - - # Make a test contact to the repo to see if it is responding. - # We need to pass in a publisher object which only has one - # origin so create one from our current publisher. - test_pub = copy.copy(self) - test_pub.repository = repo - self.transport.version_check(test_pub) + # Perform publisher metadata sanity checks. + self.__validate_metadata(croot, repo) + + return rval + finally: + # Cleanup tempdir. + shutil.rmtree(tempdir, True) + + def __refresh( + self, + full_refresh, + immediate, + mismatched=False, + progtrack=None, + include_updates=False, + ignore_errors=False, + ): + """The method to handle the overall refresh process. It + determines if a refresh is actually needed, and then calls + the first version-specific refresh method in the chain.""" + + assert self.transport + + if full_refresh: + immediate = True + + for origin, opath in self.__gen_origin_paths(): + misc.makedirs(opath) + cat = pkg.catalog.Catalog(meta_root=opath, read_only=True) + if not cat.exists: + # If a catalog hasn't been retrieved for + # any of the origins, then a refresh is + # needed now. + immediate = True + break + + # Ensure consistent directory structure. + self.create_meta_root() + + # Check if we already have a v1 catalog on disk. + if not full_refresh and self.catalog.exists: + # If catalog is on disk, check if refresh is necessary. + if not immediate and not self.needs_refresh: + # No refresh needed. + return False, None + + any_changed = False + any_refreshed = False + failed = [] + total = 0 + for origin, opath in self.__gen_origin_paths(): + total += 1 + try: + changed, refreshed = self.__refresh_origin( + opath, + full_refresh, + immediate, + mismatched, + origin, + progtrack=progtrack, + include_updates=include_updates, + ) + except api_errors.InvalidDepotResponseException as e: + failed.append((origin, e)) + else: + if changed: + any_changed = True + if refreshed: + any_refreshed = True + + if any_refreshed: + # Update refresh time. + self.last_refreshed = dt.datetime.utcnow() + + # Finally, build a new catalog for this publisher based on a + # composite of the catalogs from all origins. + if self.__rebuild_catalog(): + any_changed = True + + errors = None + if failed: + errors = api_errors.CatalogOriginRefreshException(failed, total) + + return any_changed, errors + + def refresh( + self, + full_refresh=False, + immediate=False, + progtrack=None, + include_updates=False, + ): + """Refreshes the publisher's metadata, returning a tuple + containing a boolean value indicating whether any updates to the + publisher's metadata occurred and an error object, which is + either a CatalogOriginRefreshException containing all the failed + origins for this publisher or None. + + 'full_refresh' is an optional boolean value indicating whether + a full retrieval of publisher metadata (e.g. catalogs) or only + an update to the existing metadata should be performed. When + True, 'immediate' is also set to True. + + 'immediate' is an optional boolean value indicating whether + a refresh should occur now. If False, a publisher's selected + repository will be checked for updates only if needs_refresh + is True. + + 'include_updates' is an optional boolean value indicating + whether all catalog updates should be retrieved additionally to + the catalog.""" + + try: + return self.__refresh( + full_refresh, + immediate, + progtrack=progtrack, + include_updates=include_updates, + ) + except ( + api_errors.BadCatalogUpdateIdentity, + api_errors.DuplicateCatalogEntry, + api_errors.ObsoleteCatalogUpdate, + api_errors.UnknownUpdateType, + ): + if full_refresh: + # Completely unexpected failure. + # These exceptions should never + # be raised for a full refresh + # case anyway, so the error should + # definitely be raised. + raise + + # The incremental update likely failed for one or + # more of the following reasons: + # + # * The origin for the publisher has changed. + # + # * The catalog that the publisher is offering + # is now completely different (due to a restore + # from backup or --rebuild possibly). + # + # * The catalog that the publisher is offering + # has been restored to an older version, and + # packages that already exist in this client's + # copy of the catalog have been re-addded. + # + # * The type of incremental update operation that + # that was performed on the catalog isn't supported + # by this version of the client, so a full retrieval + # is required. + # + return self.__refresh(True, True, progtrack=progtrack) + except api_errors.MismatchedCatalog: + if full_refresh: + # If this was a full refresh, don't bother + # retrying as it implies that the content + # retrieved wasn't cached. + raise + + # Retrieval of the catalog attributes and/or parts was + # successful, but the identity (digest or other + # information) didn't match the catalog attributes. + # This could be the result of a misbehaving or stale + # cache. + return self.__refresh( + False, True, mismatched=True, progtrack=progtrack + ) + except (api_errors.BadCatalogSignatures, api_errors.InvalidCatalogFile): + # Assembly of the catalog failed, but this could be due + # to a transient error. So, retry at least once more. + return self.__refresh(True, True, progtrack=progtrack) + except (api_errors.BadCatalogSignatures, api_errors.InvalidCatalogFile): + # Assembly of the catalog failed, but this could be due + # to a transient error. So, retry at least once more. + return self.__refresh(True, True, progtrack=progtrack) + + def remove_meta_root(self): + """Removes the publisher's meta_root.""" + + if not self.meta_root: + raise api_errors.BadPublisherMetaRoot( + self.meta_root, operation="remove_meta_root" + ) - # Ensure that the temporary directory gets removed regardless - # of success or failure. - try: - rval = self.__refresh_v1(croot, tempdir, - full_refresh, immediate, mismatched, repo, - progtrack=progtrack, - include_updates=include_updates) - - # Perform publisher metadata sanity checks. - self.__validate_metadata(croot, repo) - - return rval - finally: - # Cleanup tempdir. - shutil.rmtree(tempdir, True) - - def __refresh(self, full_refresh, immediate, mismatched=False, - progtrack=None, include_updates=False, ignore_errors=False): - """The method to handle the overall refresh process. It - determines if a refresh is actually needed, and then calls - the first version-specific refresh method in the chain.""" - - assert self.transport - - if full_refresh: - immediate = True - - for origin, opath in self.__gen_origin_paths(): - misc.makedirs(opath) - cat = pkg.catalog.Catalog(meta_root=opath, - read_only=True) - if not cat.exists: - # If a catalog hasn't been retrieved for - # any of the origins, then a refresh is - # needed now. - immediate = True - break - - # Ensure consistent directory structure. - self.create_meta_root() + try: + shutil.rmtree(self.meta_root) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + if e.errno not in (errno.ENOENT, errno.ESRCH): + raise + + def reset_client_uuid(self): + """Replaces the current client_uuid with a new UUID.""" + + self.__client_uuid = str(uuid.uuid1()) + self.__client_uuid_time = dt.datetime.utcnow().ctime() + + def validate_config(self, repo_uri=None): + """Verify that the publisher's configuration (such as prefix) + matches that provided by the repository. If the configuration + does not match as expected, an UnknownRepositoryPublishers + exception will be raised. + + 'repo_uri' is an optional RepositoryURI object or URI string + containing the location of the repository. If not provided, + the publisher's repository will be used instead.""" + + if repo_uri and not isinstance(repo_uri, RepositoryURI): + repo = RepositoryURI(repo_uri) + elif not repo_uri: + # Transport actually allows both type of objects. + repo = self + else: + repo = repo_uri + + pubs = None + try: + pubs = self.transport.get_publisherdata(repo) + except ( + api_errors.TransportError, + api_errors.UnsupportedRepositoryOperation, + ): + # Nothing more can be done (because the target origin + # can't be contacted, or because it doesn't support + # retrieval of publisher configuration data). + return + + if not pubs: + raise api_errors.RepoPubConfigUnavailable( + location=repo_uri, pub=self + ) - # Check if we already have a v1 catalog on disk. - if not full_refresh and self.catalog.exists: - # If catalog is on disk, check if refresh is necessary. - if not immediate and not self.needs_refresh: - # No refresh needed. - return False, None - - any_changed = False - any_refreshed = False - failed = [] - total = 0 - for origin, opath in self.__gen_origin_paths(): - total += 1 - try: - changed, refreshed = self.__refresh_origin( - opath, full_refresh, immediate, mismatched, - origin, progtrack=progtrack, - include_updates=include_updates) - except api_errors.InvalidDepotResponseException as e: - failed.append((origin, e)) - else: - if changed: - any_changed = True - if refreshed: - any_refreshed = True - - if any_refreshed: - # Update refresh time. - self.last_refreshed = dt.datetime.utcnow() - - # Finally, build a new catalog for this publisher based on a - # composite of the catalogs from all origins. - if self.__rebuild_catalog(): - any_changed = True - - errors = None - if failed: - errors = api_errors.CatalogOriginRefreshException( - failed, total) - - return any_changed, errors - - def refresh(self, full_refresh=False, immediate=False, progtrack=None, - include_updates=False): - """Refreshes the publisher's metadata, returning a tuple - containing a boolean value indicating whether any updates to the - publisher's metadata occurred and an error object, which is - either a CatalogOriginRefreshException containing all the failed - origins for this publisher or None. - - 'full_refresh' is an optional boolean value indicating whether - a full retrieval of publisher metadata (e.g. catalogs) or only - an update to the existing metadata should be performed. When - True, 'immediate' is also set to True. - - 'immediate' is an optional boolean value indicating whether - a refresh should occur now. If False, a publisher's selected - repository will be checked for updates only if needs_refresh - is True. - - 'include_updates' is an optional boolean value indicating - whether all catalog updates should be retrieved additionally to - the catalog.""" + if self.prefix not in pubs: + known = [p.prefix for p in pubs] + if repo_uri: + raise api_errors.UnknownRepositoryPublishers( + known=known, unknown=[self.prefix], location=repo_uri + ) + raise api_errors.UnknownRepositoryPublishers( + known=known, + unknown=[self.prefix], + origins=self.repository.origins, + ) - try: - return self.__refresh(full_refresh, immediate, - progtrack=progtrack, - include_updates=include_updates) - except (api_errors.BadCatalogUpdateIdentity, - api_errors.DuplicateCatalogEntry, - api_errors.ObsoleteCatalogUpdate, - api_errors.UnknownUpdateType): - if full_refresh: - # Completely unexpected failure. - # These exceptions should never - # be raised for a full refresh - # case anyway, so the error should - # definitely be raised. - raise - - # The incremental update likely failed for one or - # more of the following reasons: - # - # * The origin for the publisher has changed. - # - # * The catalog that the publisher is offering - # is now completely different (due to a restore - # from backup or --rebuild possibly). - # - # * The catalog that the publisher is offering - # has been restored to an older version, and - # packages that already exist in this client's - # copy of the catalog have been re-addded. - # - # * The type of incremental update operation that - # that was performed on the catalog isn't supported - # by this version of the client, so a full retrieval - # is required. - # - return self.__refresh(True, True, progtrack=progtrack) - except api_errors.MismatchedCatalog: - if full_refresh: - # If this was a full refresh, don't bother - # retrying as it implies that the content - # retrieved wasn't cached. - raise - - # Retrieval of the catalog attributes and/or parts was - # successful, but the identity (digest or other - # information) didn't match the catalog attributes. - # This could be the result of a misbehaving or stale - # cache. - return self.__refresh(False, True, mismatched=True, - progtrack=progtrack) - except (api_errors.BadCatalogSignatures, - api_errors.InvalidCatalogFile): - # Assembly of the catalog failed, but this could be due - # to a transient error. So, retry at least once more. - return self.__refresh(True, True, progtrack=progtrack) - except (api_errors.BadCatalogSignatures, - api_errors.InvalidCatalogFile): - # Assembly of the catalog failed, but this could be due - # to a transient error. So, retry at least once more. - return self.__refresh(True, True, progtrack=progtrack) - - def remove_meta_root(self): - """Removes the publisher's meta_root.""" - - if not self.meta_root: - raise api_errors.BadPublisherMetaRoot(self.meta_root, - operation="remove_meta_root") + def approve_ca_cert(self, cert): + """Add the cert as a CA for manifest signing for this publisher. + + The 'cert' parameter is a string of the certificate to add. + """ + + cert = self.__string_to_cert(cert) + hsh = self.__add_cert(cert) + # If the user had previously revoked this certificate, remove + # the certificate from that list. + if hsh in self.revoked_ca_certs: + t = set(self.revoked_ca_certs) + t.remove(hsh) + self.revoked_ca_certs = list(t) + self.approved_ca_certs.append(hsh) + + def revoke_ca_cert(self, s): + """Record that the cert with hash 's' is no longer trusted + as a CA. This method currently assumes it's only invoked as + a result of user action.""" + + self.revoked_ca_certs.append(s) + self.revoked_ca_certs = list(set(self.revoked_ca_certs)) + if s in self.approved_ca_certs: + t = set(self.approved_ca_certs) + t.remove(s) + self.approved_ca_certs = list(t) + + def unset_ca_cert(self, s): + """If the cert with hash 's' has been added or removed by the + user, undo the add or removal.""" + + if s in self.approved_ca_certs: + t = set(self.approved_ca_certs) + t.remove(s) + self.approved_ca_certs = list(t) + if s in self.revoked_ca_certs: + t = set(self.revoked_ca_certs) + t.remove(s) + self.revoked_ca_certs = list(t) + + @staticmethod + def __hash_cert(c): + # In order to interoperate with older images, we must use SHA-1 + # here. + return hashlib.sha1( + c.public_bytes(serialization.Encoding.PEM) + ).hexdigest() + + @staticmethod + def __string_to_cert(s, pkg_hash=None): + """Convert a string to a X509 cert.""" + + try: + return x509.load_pem_x509_certificate( + misc.force_bytes(s), default_backend() + ) + except ValueError: + if pkg_hash is not None: + raise api_errors.BadFileFormat( + _( + "The file " + "with hash {0} was expected to be a PEM " + "certificate but it could not be " + "read." + ).format(pkg_hash) + ) + raise api_errors.BadFileFormat( + _( + "The following string " + "was expected to be a PEM certificate, but it " + "could not be parsed as such:\n{0}".format(s) + ) + ) + def __add_cert(self, cert, pkg_hash=None): + """Add the pem representation of the certificate 'cert' to the + certificates this publisher knows about.""" + + self.create_meta_root() + if not pkg_hash: + pkg_hash = self.__hash_cert(cert) + pkg_hash_pth = os.path.join(self.cert_root, pkg_hash) + file_problem = False + try: + with open(pkg_hash_pth, "wb") as fh: + fh.write(cert.public_bytes(serialization.Encoding.PEM)) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + file_problem = True + + # Note that while we store certs by their subject hashes, + # we use our own hashing since cryptography has no interface + # for the subject hash and other crypto frameworks have been + # inconsistent with OpenSSL. + subj_hsh = hashlib.sha1(misc.force_bytes(cert.subject)).hexdigest() + c = 0 + made_link = False + while not made_link: + fn = os.path.join(self.__subj_root, "{0}.{1}".format(subj_hsh, c)) + if os.path.exists(fn): + c += 1 + continue + if not file_problem: try: - shutil.rmtree(self.meta_root) + portable.link(pkg_hash_pth, fn) + made_link = True except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - if e.errno not in (errno.ENOENT, errno.ESRCH): - raise - - def reset_client_uuid(self): - """Replaces the current client_uuid with a new UUID.""" - - self.__client_uuid = str(uuid.uuid1()) - self.__client_uuid_time = dt.datetime.utcnow().ctime() - - def validate_config(self, repo_uri=None): - """Verify that the publisher's configuration (such as prefix) - matches that provided by the repository. If the configuration - does not match as expected, an UnknownRepositoryPublishers - exception will be raised. - - 'repo_uri' is an optional RepositoryURI object or URI string - containing the location of the repository. If not provided, - the publisher's repository will be used instead.""" - - if repo_uri and not isinstance(repo_uri, RepositoryURI): - repo = RepositoryURI(repo_uri) - elif not repo_uri: - # Transport actually allows both type of objects. - repo = self - else: - repo = repo_uri - - pubs = None + pass + if not made_link: + self.__issuers.setdefault(subj_hsh, []).append(c) + made_link = True + return pkg_hash + + def get_cert_by_hash( + self, + pkg_hash, + verify_hash=False, + only_retrieve=False, + hash_func=digest.DEFAULT_HASH_FUNC, + ): + """Given a pkg5 hash, retrieve the cert that's associated with + it. + + The 'pkg_hash' parameter contains the file hash of the + certificate to retrieve. + + The 'verify_hash' parameter determines the file that's read + from disk matches the expected hash. + + The 'only_retrieve' parameter determines whether a X509 object + is built from the certificate retrieved or if the certificate + is only stored on disk.""" + + assert not (verify_hash and only_retrieve) + pth = os.path.join(self.cert_root, pkg_hash) + pth_exists = os.path.exists(pth) + if pth_exists and only_retrieve: + return None + if pth_exists: + with open(pth, "rb") as fh: + s = fh.read() + else: + s = self.transport.get_content(self, pkg_hash, hash_func=hash_func) + c = self.__string_to_cert(s, pkg_hash) + if not pth_exists: + try: + self.__add_cert(c, pkg_hash=pkg_hash) + except api_errors.PermissionsException: + pass + if only_retrieve: + return None + + if verify_hash: + h = misc.get_data_digest( + BytesIO(misc.force_bytes(s)), length=len(s), hash_func=hash_func + )[0] + if h != pkg_hash: + raise api_errors.ModifiedCertificateException(c, pth) + return c + + def __rebuild_subj_root(self): + """Rebuild subject hash metadata.""" + + # clean up the old subject hash files to prevent + # junk files residing in the directory + try: + shutil.rmtree(self.__subj_root) + except EnvironmentError: + # if unprivileged user, we can't add + # certs to it + pass + else: + for p in os.listdir(self.cert_root): + path = os.path.join(self.cert_root, p) + if not os.path.isfile(path): + continue + with open(path, "rb") as fh: + s = fh.read() + cert = self.__string_to_cert(s) + self.__add_cert(cert) + + def __get_certs_by_name(self, name): + """Given 'name', a Cryptograhy 'Name' object, return the certs + with that name as a subject.""" + + res = [] + count = 0 + name_hsh = hashlib.sha1(misc.force_bytes(name)).hexdigest() + + def load_cert(pth): + with open(pth, "rb") as f: + return x509.load_pem_x509_certificate( + f.read(), default_backend() + ) + + try: + while True: + pth = os.path.join( + self.__subj_root, "{0}.{1}".format(name_hsh, count) + ) + res.append(load_cert(pth)) + count += 1 + except EnvironmentError as e: + # When switching to a different hash algorithm, the hash + # name of file changes so that we couldn't find the + # file. We try harder to rebuild the subject's metadata + # if it's the first time we fail (count == 0). + if count == 0 and e.errno == errno.ENOENT: + self.__rebuild_subj_root() try: - pubs = self.transport.get_publisherdata(repo) - except (api_errors.TransportError, - api_errors.UnsupportedRepositoryOperation): - # Nothing more can be done (because the target origin - # can't be contacted, or because it doesn't support - # retrieval of publisher configuration data). - return - - if not pubs: - raise api_errors.RepoPubConfigUnavailable( - location=repo_uri, pub=self) - - if self.prefix not in pubs: - known = [p.prefix for p in pubs] - if repo_uri: - raise api_errors.UnknownRepositoryPublishers( - known=known, unknown=[self.prefix], - location=repo_uri) - raise api_errors.UnknownRepositoryPublishers( - known=known, unknown=[self.prefix], - origins=self.repository.origins) - - def approve_ca_cert(self, cert): - """Add the cert as a CA for manifest signing for this publisher. - - The 'cert' parameter is a string of the certificate to add. - """ - - cert = self.__string_to_cert(cert) - hsh = self.__add_cert(cert) - # If the user had previously revoked this certificate, remove - # the certificate from that list. - if hsh in self.revoked_ca_certs: - t = set(self.revoked_ca_certs) - t.remove(hsh) - self.revoked_ca_certs = list(t) - self.approved_ca_certs.append(hsh) - - def revoke_ca_cert(self, s): - """Record that the cert with hash 's' is no longer trusted - as a CA. This method currently assumes it's only invoked as - a result of user action.""" - - self.revoked_ca_certs.append(s) - self.revoked_ca_certs = list(set( - self.revoked_ca_certs)) - if s in self.approved_ca_certs: - t = set(self.approved_ca_certs) - t.remove(s) - self.approved_ca_certs = list(t) - - def unset_ca_cert(self, s): - """If the cert with hash 's' has been added or removed by the - user, undo the add or removal.""" - - if s in self.approved_ca_certs: - t = set(self.approved_ca_certs) - t.remove(s) - self.approved_ca_certs = list(t) - if s in self.revoked_ca_certs: - t = set(self.revoked_ca_certs) - t.remove(s) - self.revoked_ca_certs = list(t) - - @staticmethod - def __hash_cert(c): - # In order to interoperate with older images, we must use SHA-1 - # here. - return hashlib.sha1( - c.public_bytes(serialization.Encoding.PEM)).hexdigest() - - @staticmethod - def __string_to_cert(s, pkg_hash=None): - """Convert a string to a X509 cert.""" + res.append(load_cert(pth)) + except EnvironmentError as ex: + if ex.errno != errno.ENOENT: + raise + t = api_errors._convert_error(e, [errno.ENOENT]) + if t: + raise t + res.extend(self.__issuers.get(name_hsh, [])) + return res + + def get_ca_certs(self): + """Return a dictionary of the CA certificates for this + publisher.""" + + if self.ca_dict is not None: + return self.ca_dict + self.ca_dict = {} + # CA certs approved for this publisher are stored by hash to + # prevent the later substitution or confusion over what certs + # have or have not been approved. + for h in set(self.approved_ca_certs): + c = self.get_cert_by_hash(h, verify_hash=True) + s = hashlib.sha1(misc.force_bytes(c.subject)).hexdigest() + self.ca_dict.setdefault(s, []) + self.ca_dict[s].append(c) + return self.ca_dict + + def update_props( + self, + set_props=EmptyI, + add_prop_values=EmptyDict, + remove_prop_values=EmptyDict, + unset_props=EmptyI, + ): + """Update the properties set for this publisher with the ones + provided as arguments. The order of application is that any + existing properties are unset, then properties are set to their + new values, then values are added to properties, and finally + values are removed from properties.""" + + # Delay validation so that any intermittent inconsistent state + # doesn't cause problems. + self.__delay_validation = True + # Remove existing properties. + for n in unset_props: + self.properties.pop(n, None) + # Add or reset new properties. + self.properties.update(set_props) + # Add new values to properties. + for n in add_prop_values.keys(): + self.properties.setdefault(n, []) + if not isinstance(self.properties[n], list): + raise api_errors.InvalidPropertyValue( + _( + "Cannot add a value to a single valued " + "property, The property name is '{name}' " + "and the current value is '{value}'" + ).format(name=n, value=self.properties[n]) + ) + self.properties[n].extend(add_prop_values[n]) + # Remove values from properties. + for n in remove_prop_values.keys(): + if n not in self.properties: + raise api_errors.InvalidPropertyValue( + _( + "Cannot remove a value from the property " + "{name} because the property does not " + "exist." + ).format(name=n) + ) + if not isinstance(self.properties[n], list): + raise api_errors.InvalidPropertyValue( + _( + "Cannot remove a value from a single " + "valued property, unset must be used. The " + "property name is '{name}' and the " + "current value is '{value}'" + ).format(name=n, value=self.properties[n]) + ) + for v in remove_prop_values[n]: try: - return x509.load_pem_x509_certificate( - misc.force_bytes(s), default_backend()) + self.properties[n].remove(v) except ValueError: - if pkg_hash is not None: - raise api_errors.BadFileFormat(_("The file " - "with hash {0} was expected to be a PEM " - "certificate but it could not be " - "read.").format(pkg_hash)) - raise api_errors.BadFileFormat(_("The following string " - "was expected to be a PEM certificate, but it " - "could not be parsed as such:\n{0}".format(s))) - - def __add_cert(self, cert, pkg_hash=None): - """Add the pem representation of the certificate 'cert' to the - certificates this publisher knows about.""" + raise api_errors.InvalidPropertyValue( + _( + "Cannot remove the value {value} " + "from the property {name} " + "because the value is not in the " + "property's list." + ).format(value=v, name=n) + ) + self.__delay_validation = False + self.__validate_properties() + + def __validate_properties(self): + """Check that the properties set for this publisher are + consistent with each other.""" + + if self.__properties.get(SIGNATURE_POLICY, "") == "require-names": + if not self.__properties.get("signature-required-names", None): + raise api_errors.InvalidPropertyValue( + _( + "At least one name must be provided for " + "the signature-required-names policy." + ) + ) + + def __verify_x509_signature(self, c, key): + """Verify the signature of a certificate or CRL 'c' against a + provided public key 'key'.""" + + if isinstance(c, x509.Certificate): + data = c.tbs_certificate_bytes + elif isinstance(c, x509.CertificateRevocationList): + data = c.tbs_certlist_bytes + else: + raise AssertionError( + "Invalid x509 object for " + "signature verification: {0}".format(type(c)) + ) - self.create_meta_root() - if not pkg_hash: - pkg_hash = self.__hash_cert(cert) - pkg_hash_pth = os.path.join(self.cert_root, pkg_hash) - file_problem = False - try: - with open(pkg_hash_pth, "wb") as fh: - fh.write(cert.public_bytes( - serialization.Encoding.PEM)) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - file_problem = True - - # Note that while we store certs by their subject hashes, - # we use our own hashing since cryptography has no interface - # for the subject hash and other crypto frameworks have been - # inconsistent with OpenSSL. - subj_hsh = hashlib.sha1(misc.force_bytes( - cert.subject)).hexdigest() - c = 0 - made_link = False - while not made_link: - fn = os.path.join(self.__subj_root, - "{0}.{1}".format(subj_hsh, c)) - if os.path.exists(fn): - c += 1 - continue - if not file_problem: - try: - portable.link(pkg_hash_pth, fn) - made_link = True - except EnvironmentError as e: - pass - if not made_link: - self.__issuers.setdefault(subj_hsh, []).append( - c) - made_link = True - return pkg_hash - - def get_cert_by_hash(self, pkg_hash, verify_hash=False, - only_retrieve=False, hash_func=digest.DEFAULT_HASH_FUNC): - """Given a pkg5 hash, retrieve the cert that's associated with - it. - - The 'pkg_hash' parameter contains the file hash of the - certificate to retrieve. - - The 'verify_hash' parameter determines the file that's read - from disk matches the expected hash. - - The 'only_retrieve' parameter determines whether a X509 object - is built from the certificate retrieved or if the certificate - is only stored on disk. """ - - assert not (verify_hash and only_retrieve) - pth = os.path.join(self.cert_root, pkg_hash) - pth_exists = os.path.exists(pth) - if pth_exists and only_retrieve: - return None - if pth_exists: - with open(pth, "rb") as fh: - s = fh.read() - else: - s = self.transport.get_content(self, pkg_hash, - hash_func=hash_func) - c = self.__string_to_cert(s, pkg_hash) - if not pth_exists: - try: - self.__add_cert(c, pkg_hash=pkg_hash) - except api_errors.PermissionsException: - pass - if only_retrieve: - return None - - if verify_hash: - h = misc.get_data_digest(BytesIO(misc.force_bytes(s)), - length=len(s), hash_func=hash_func)[0] - if h != pkg_hash: - raise api_errors.ModifiedCertificateException(c, - pth) - return c - - def __rebuild_subj_root(self): - """Rebuild subject hash metadata.""" - - # clean up the old subject hash files to prevent - # junk files residing in the directory - try: - shutil.rmtree(self.__subj_root) - except EnvironmentError: - # if unprivileged user, we can't add - # certs to it - pass - else: - for p in os.listdir(self.cert_root): - path = os.path.join(self.cert_root, p) - if not os.path.isfile(path): - continue - with open(path, "rb") as fh: - s = fh.read() - cert = self.__string_to_cert(s) - self.__add_cert(cert) - - def __get_certs_by_name(self, name): - """Given 'name', a Cryptograhy 'Name' object, return the certs - with that name as a subject.""" - - res = [] - count = 0 - name_hsh = hashlib.sha1(misc.force_bytes(name)).hexdigest() - - def load_cert(pth): - with open(pth, "rb") as f: - return x509.load_pem_x509_certificate( - f.read(), default_backend()) + try: + key.verify( + c.signature, + data, + padding.PKCS1v15(), + c.signature_hash_algorithm, + ) + return True + except Exception: + return False - try: - while True: - pth = os.path.join(self.__subj_root, - "{0}.{1}".format(name_hsh, count)) - res.append(load_cert(pth)) - count += 1 - except EnvironmentError as e: - # When switching to a different hash algorithm, the hash - # name of file changes so that we couldn't find the - # file. We try harder to rebuild the subject's metadata - # if it's the first time we fail (count == 0). - if count == 0 and e.errno == errno.ENOENT: - self.__rebuild_subj_root() - try: - res.append(load_cert(pth)) - except EnvironmentError as ex: - if ex.errno != errno.ENOENT: - raise - - t = api_errors._convert_error(e, - [errno.ENOENT]) - if t: - raise t - res.extend(self.__issuers.get(name_hsh, [])) - return res - - def get_ca_certs(self): - """Return a dictionary of the CA certificates for this - publisher.""" - - if self.ca_dict is not None: - return self.ca_dict - self.ca_dict = {} - # CA certs approved for this publisher are stored by hash to - # prevent the later substitution or confusion over what certs - # have or have not been approved. - for h in set(self.approved_ca_certs): - c = self.get_cert_by_hash(h, verify_hash=True) - s = hashlib.sha1(misc.force_bytes( - c.subject)).hexdigest() - self.ca_dict.setdefault(s, []) - self.ca_dict[s].append(c) - return self.ca_dict - - def update_props(self, set_props=EmptyI, add_prop_values=EmptyDict, - remove_prop_values=EmptyDict, unset_props=EmptyI): - """Update the properties set for this publisher with the ones - provided as arguments. The order of application is that any - existing properties are unset, then properties are set to their - new values, then values are added to properties, and finally - values are removed from properties.""" - - # Delay validation so that any intermittent inconsistent state - # doesn't cause problems. - self.__delay_validation = True - # Remove existing properties. - for n in unset_props: - self.properties.pop(n, None) - # Add or reset new properties. - self.properties.update(set_props) - # Add new values to properties. - for n in add_prop_values.keys(): - self.properties.setdefault(n, []) - if not isinstance(self.properties[n], list): - raise api_errors.InvalidPropertyValue(_( - "Cannot add a value to a single valued " - "property, The property name is '{name}' " - "and the current value is '{value}'" - ).format(name=n, value=self.properties[n])) - self.properties[n].extend(add_prop_values[n]) - # Remove values from properties. - for n in remove_prop_values.keys(): - if n not in self.properties: - raise api_errors.InvalidPropertyValue(_( - "Cannot remove a value from the property " - "{name} because the property does not " - "exist.").format(name=n)) - if not isinstance(self.properties[n], list): - raise api_errors.InvalidPropertyValue(_( - "Cannot remove a value from a single " - "valued property, unset must be used. The " - "property name is '{name}' and the " - "current value is '{value}'").format( - name=n, value=self.properties[n])) - for v in remove_prop_values[n]: - try: - self.properties[n].remove(v) - except ValueError: - raise api_errors.InvalidPropertyValue(_( - "Cannot remove the value {value} " - "from the property {name} " - "because the value is not in the " - "property's list.").format( - value=v, name=n)) - self.__delay_validation = False - self.__validate_properties() - - def __validate_properties(self): - """Check that the properties set for this publisher are - consistent with each other.""" - - if self.__properties.get(SIGNATURE_POLICY, "") == \ - "require-names": - if not self.__properties.get("signature-required-names", - None): - raise api_errors.InvalidPropertyValue(_( - "At least one name must be provided for " - "the signature-required-names policy.")) - - def __verify_x509_signature(self, c, key): - """Verify the signature of a certificate or CRL 'c' against a - provided public key 'key'.""" - - if isinstance(c, x509.Certificate): - data = c.tbs_certificate_bytes - elif isinstance(c, x509.CertificateRevocationList): - data = c.tbs_certlist_bytes - else: - raise AssertionError("Invalid x509 object for " - "signature verification: {0}".format(type(c))) + def __check_crl(self, cert, ca_dict, crl_uri, more_uris=False): + """Determines whether the certificate has been revoked by the + CRL located at 'crl_uri'. - try: - key.verify(c.signature, data, padding.PKCS1v15(), - c.signature_hash_algorithm) - return True - except Exception: - return False - - def __check_crl(self, cert, ca_dict, crl_uri, more_uris=False): - """Determines whether the certificate has been revoked by the - CRL located at 'crl_uri'. - - The 'cert' parameter is the certificate to check for revocation. - - The 'ca_dict' is a dictionary which maps subject hashes to - certs treated as trust anchors.""" - - crl = None - if self.transport: - crl = self.transport.get_crl(crl_uri, self.__crl_root, - more_uris=more_uris) - - # If we couldn't retrieve a CRL from the distribution point - # and no CRL is cached on disk, assume the cert has not been - # revoked. It's possible that this should be an image or - # publisher setting in the future. - if not crl: - return True - - # A CRL has been found, now it needs to be validated like - # a certificate is. - verified_crl = False - crl_issuer = crl.issuer - tas = ca_dict.get(hashlib.sha1(misc.force_bytes( - crl_issuer)).hexdigest(), []) - for t in tas: - try: - if self.__verify_x509_signature(crl, - t.public_key()): - # If t isn't approved for signing crls, - # the exception __check_extensions - # raises will take the code to the - # except below. - self.__check_extensions(t, - CRL_SIGNING_USE, 0) - verified_crl = True - except api_errors.SigningException: - pass - if not verified_crl: - crl_cas = self.__get_certs_by_name(crl_issuer) - for c in crl_cas: - if self.__verify_x509_signature(crl, - c.public_key()): - try: - self.verify_chain(c, ca_dict, 0, - True, - usages=CRL_SIGNING_USE) - except api_errors.SigningException: - pass - else: - verified_crl = True - break - if not verified_crl: - return True - - # For a certificate to be revoked, its CRL must be validated - # and revoked the certificate. - - assert crl.issuer == cert.issuer - for rev in crl: - if rev.serial_number != cert.serial_number: - continue - try: - reason = rev.extensions.get_extension_for_oid( - x509.OID_CRL_REASON).value - except x509.ExtensionNotFound: - reason = None - raise api_errors.RevokedCertificate(cert, reason) - - def __check_crls(self, cert, ca_dict): - """Determines whether the certificate has been revoked by one of - its CRLs. - - The 'cert' parameter is the certificate to check for revocation. - - The 'ca_dict' is a dictionary which maps subject hashes to - certs treated as trust anchors.""" - - # If the certificate doesn't have a CRL location listed, treat - # it as valid. - - # The CRLs to be retrieved are stored in the - # CRLDistributionPoints extensions which is structured like - # this: - # - # CRLDitsributionPoints = [ - # CRLDistributionPoint = [ - # union { - # full_name = [ GeneralName, ... ] - # relative_name = [ GeneralName, ... ] - # }, ... ] - # , ... ] - # - # Relative names are a feature in X509 certs which allow to - # specify a location relative to another certificate. We are not - # supporting this and I'm not sure anybody is using this for - # CRLs. - # Full names are absolute locations but can be in different - # formats (refer to RFC5280) but in general only the URI type is - # used for CRLs. So this is the only thing we support here. + The 'cert' parameter is the certificate to check for revocation. - try: - dps = cert.extensions.get_extension_for_oid( - x509.oid.ExtensionOID.CRL_DISTRIBUTION_POINTS).value - except x509.ExtensionNotFound: - return - - crl_uris = [] - for dp in dps: - if not dp.full_name: - # we don't support relative names - continue - for uri in dp.full_name: - if not isinstance(uri, - x509.UniformResourceIdentifier): - # we only support URIs - continue - crl_uris.append(str(uri.value)) - - for i, uri in enumerate(crl_uris): - more_uris = i < len(crl_uris) - 1 - self.__check_crl(cert, ca_dict, uri, - more_uris=more_uris) - - def __check_revocation(self, cert, ca_dict, use_crls): - hsh = self.__hash_cert(cert) - if hsh in self.revoked_ca_certs: - raise api_errors.RevokedCertificate(cert, - "User manually revoked certificate.") - if use_crls: - self.__check_crls(cert, ca_dict) - - def __check_extensions(self, cert, usages, cur_pathlen): - """Check whether the critical extensions in this certificate - are supported and allow the provided use(s).""" + The 'ca_dict' is a dictionary which maps subject hashes to + certs treated as trust anchors.""" - try: - exts = cert.extensions - except ValueError as e: - raise api_errors.InvalidCertificateExtensions( - cert, e) - - def check_values(vs): - for v in vs: - if v in supported_vs: - continue - # If there is only one extension value, it must - # be the problematic one. Otherwise, we also - # output the first unsupported value as the - # problematic value following extension value. - if len(vs) < 2: - raise api_errors.UnsupportedExtensionValue( - cert, ext, ", ".join(vs)) - raise api_errors.UnsupportedExtensionValue( - cert, ext, ", ".join(vs), v) - - for ext in exts: - etype = type(ext.value) - if etype in SUPPORTED_EXTENSION_VALUES: - supported_vs = SUPPORTED_EXTENSION_VALUES[etype] - keys = EXTENSIONS_VALUES[etype] - if etype == x509.BasicConstraints: - pathlen = ext.value.path_length - if pathlen is not None and \ - cur_pathlen > pathlen: - raise api_errors.PathlenTooShort(cert, - cur_pathlen, pathlen) - elif etype == x509.KeyUsage: - keys = list(EXTENSIONS_VALUES[etype]) - if not getattr(ext.value, - "key_agreement"): - # Cryptography error: - # encipher_only/decipher_only is - # undefined unless key_agreement - # is true - keys.remove("encipher_only") - keys.remove("decipher_only") - vs = [ - key - for key in keys - if getattr(ext.value, key) - ] - # Check whether the values for the extension are - # recognized. - check_values(vs) - # For each use, check to see whether it's - # permitted by the certificate's extension - # values. - if etype not in usages: - continue - for u in usages[etype]: - if u not in vs: - raise api_errors.InappropriateCertificateUse( - cert, ext, u, ", ".join(vs)) - # If the extension name is unrecognized and critical, - # then the chain cannot be verified. - elif ext.critical: - raise api_errors.UnsupportedCriticalExtension( - cert, ext) - - def verify_chain(self, cert, ca_dict, cur_pathlen, use_crls, - required_names=None, usages=None): - """Validates the certificate against the given trust anchors. - - The 'cert' parameter is the certificate to validate. - - The 'ca_dict' parameter is a dictionary which maps subject - hashes to certs treated as trust anchors. - - The 'cur_pathlen' parameter is an integer indicating how many - certificates have been found between cert and the leaf cert. - - The 'use_crls' parameter is a boolean indicating whether - certificates should be checked to see if they've been revoked. - - The 'required_names' parameter is a set of strings that must - be seen as a CN in the chain of trust for the certificate.""" - - if required_names is None: - required_names = set() - verified = False - continue_loop = True - certs_with_problems = [] + crl = None + if self.transport: + crl = self.transport.get_crl( + crl_uri, self.__crl_root, more_uris=more_uris + ) - ca_dict = copy.copy(ca_dict) - for k, v in six.iteritems(self.get_ca_certs()): - if k in ca_dict: - ca_dict[k].extend(v) - else: - ca_dict[k] = v - - def merge_dicts(d1, d2): - """Function for merging usage dictionaries.""" - res = copy.deepcopy(d1) - for k in d2: - if k in res: - res[k].extend(d2[k]) - else: - res[k] = d2[k] - return res - - def discard_names(cert, required_names): - for cert_cn in [ - str(c.value) - for c - in cert.subject.get_attributes_for_oid( - x509.oid.NameOID.COMMON_NAME) - ]: - required_names.discard(cert_cn) - - if not usages: - usages = {} - for u in POSSIBLE_USES: - usages = merge_dicts(usages, u) - - # Check whether we can validate this certificate. - self.__check_extensions(cert, usages, cur_pathlen) - - # Check whether this certificate has been revoked. - self.__check_revocation(cert, ca_dict, use_crls) - - while continue_loop: - # If this certificate's CN is in the set of required - # names, remove it. - discard_names(cert, required_names) - - # Find the certificate that issued this certificate. - issuer = cert.issuer - issuer_hash = hashlib.sha1(misc.force_bytes( - issuer)).hexdigest() - - # See whether this certificate was issued by any of the - # given trust anchors. - for c in ca_dict.get(issuer_hash, []): - if self.__verify_x509_signature(cert, - c.public_key()): - verified = True - # Remove any required names found in the - # trust anchor. - discard_names(c, required_names) - # If there are more names to check for - # continue up the chain of trust to look - # for them. - if not required_names: - continue_loop = False - break - - # If the subject and issuer for this certificate are - # identical and the certificate hasn't been verified - # then this is an untrusted self-signed cert and should - # be rejected. - if hashlib.sha1(misc.force_bytes( - cert.subject)).hexdigest() == issuer_hash: - if not verified: - raise \ - api_errors.UntrustedSelfSignedCert( - cert) - # This break should break the - # while continue_loop loop. - break - - # If the certificate hasn't been issued by a trust - # anchor or more names need to be found, continue - # looking up the chain of trust. - if continue_loop: - up_chain = False - # Keep track of certs that would have verified - # this certificate but had critical extensions - # we can't handle yet for error reporting. - certs_with_problems = [] - for c in self.__get_certs_by_name(issuer): - # If the certificate is approved to - # sign another certificate, verifies - # the current certificate, and hasn't - # been revoked, consider it as the - # next link in the chain. check_ca - # checks both the basicConstraints - # extension and the keyUsage extension. - if misc.check_ca(c) and \ - self.__verify_x509_signature(cert, - c.public_key()): - problem = False - # Check whether this certificate - # has a critical extension we - # don't understand. - try: - self.__check_extensions( - c, CERT_SIGNING_USE, - cur_pathlen) - self.__check_revocation(c, - ca_dict, use_crls) - except (api_errors.UnsupportedCriticalExtension, api_errors.RevokedCertificate) as e: - certs_with_problems.append(e) - problem = True - # If this certificate has no - # problems with it, it's the - # next link in the chain so make - # it the current certificate and - # add one to cur_pathlen since - # there's one more chain cert - # between the code signing cert - # and the root of the chain. - if not problem: - up_chain = True - cert = c - cur_pathlen += 1 - break - # If there's not another link in the chain to be - # found, stop the iteration. - if not up_chain: - continue_loop = False - # If the certificate wasn't verified against a trust anchor, - # raise an exception. + # If we couldn't retrieve a CRL from the distribution point + # and no CRL is cached on disk, assume the cert has not been + # revoked. It's possible that this should be an image or + # publisher setting in the future. + if not crl: + return True + + # A CRL has been found, now it needs to be validated like + # a certificate is. + verified_crl = False + crl_issuer = crl.issuer + tas = ca_dict.get( + hashlib.sha1(misc.force_bytes(crl_issuer)).hexdigest(), [] + ) + for t in tas: + try: + if self.__verify_x509_signature(crl, t.public_key()): + # If t isn't approved for signing crls, + # the exception __check_extensions + # raises will take the code to the + # except below. + self.__check_extensions(t, CRL_SIGNING_USE, 0) + verified_crl = True + except api_errors.SigningException: + pass + if not verified_crl: + crl_cas = self.__get_certs_by_name(crl_issuer) + for c in crl_cas: + if self.__verify_x509_signature(crl, c.public_key()): + try: + self.verify_chain( + c, ca_dict, 0, True, usages=CRL_SIGNING_USE + ) + except api_errors.SigningException: + pass + else: + verified_crl = True + break + if not verified_crl: + return True + + # For a certificate to be revoked, its CRL must be validated + # and revoked the certificate. + + assert crl.issuer == cert.issuer + for rev in crl: + if rev.serial_number != cert.serial_number: + continue + try: + reason = rev.extensions.get_extension_for_oid( + x509.OID_CRL_REASON + ).value + except x509.ExtensionNotFound: + reason = None + raise api_errors.RevokedCertificate(cert, reason) + + def __check_crls(self, cert, ca_dict): + """Determines whether the certificate has been revoked by one of + its CRLs. + + The 'cert' parameter is the certificate to check for revocation. + + The 'ca_dict' is a dictionary which maps subject hashes to + certs treated as trust anchors.""" + + # If the certificate doesn't have a CRL location listed, treat + # it as valid. + + # The CRLs to be retrieved are stored in the + # CRLDistributionPoints extensions which is structured like + # this: + # + # CRLDitsributionPoints = [ + # CRLDistributionPoint = [ + # union { + # full_name = [ GeneralName, ... ] + # relative_name = [ GeneralName, ... ] + # }, ... ] + # , ... ] + # + # Relative names are a feature in X509 certs which allow to + # specify a location relative to another certificate. We are not + # supporting this and I'm not sure anybody is using this for + # CRLs. + # Full names are absolute locations but can be in different + # formats (refer to RFC5280) but in general only the URI type is + # used for CRLs. So this is the only thing we support here. + + try: + dps = cert.extensions.get_extension_for_oid( + x509.oid.ExtensionOID.CRL_DISTRIBUTION_POINTS + ).value + except x509.ExtensionNotFound: + return + + crl_uris = [] + for dp in dps: + if not dp.full_name: + # we don't support relative names + continue + for uri in dp.full_name: + if not isinstance(uri, x509.UniformResourceIdentifier): + # we only support URIs + continue + crl_uris.append(str(uri.value)) + + for i, uri in enumerate(crl_uris): + more_uris = i < len(crl_uris) - 1 + self.__check_crl(cert, ca_dict, uri, more_uris=more_uris) + + def __check_revocation(self, cert, ca_dict, use_crls): + hsh = self.__hash_cert(cert) + if hsh in self.revoked_ca_certs: + raise api_errors.RevokedCertificate( + cert, "User manually revoked certificate." + ) + if use_crls: + self.__check_crls(cert, ca_dict) + + def __check_extensions(self, cert, usages, cur_pathlen): + """Check whether the critical extensions in this certificate + are supported and allow the provided use(s).""" + + try: + exts = cert.extensions + except ValueError as e: + raise api_errors.InvalidCertificateExtensions(cert, e) + + def check_values(vs): + for v in vs: + if v in supported_vs: + continue + # If there is only one extension value, it must + # be the problematic one. Otherwise, we also + # output the first unsupported value as the + # problematic value following extension value. + if len(vs) < 2: + raise api_errors.UnsupportedExtensionValue( + cert, ext, ", ".join(vs) + ) + raise api_errors.UnsupportedExtensionValue( + cert, ext, ", ".join(vs), v + ) + + for ext in exts: + etype = type(ext.value) + if etype in SUPPORTED_EXTENSION_VALUES: + supported_vs = SUPPORTED_EXTENSION_VALUES[etype] + keys = EXTENSIONS_VALUES[etype] + if etype == x509.BasicConstraints: + pathlen = ext.value.path_length + if pathlen is not None and cur_pathlen > pathlen: + raise api_errors.PathlenTooShort( + cert, cur_pathlen, pathlen + ) + elif etype == x509.KeyUsage: + keys = list(EXTENSIONS_VALUES[etype]) + if not getattr(ext.value, "key_agreement"): + # Cryptography error: + # encipher_only/decipher_only is + # undefined unless key_agreement + # is true + keys.remove("encipher_only") + keys.remove("decipher_only") + vs = [key for key in keys if getattr(ext.value, key)] + # Check whether the values for the extension are + # recognized. + check_values(vs) + # For each use, check to see whether it's + # permitted by the certificate's extension + # values. + if etype not in usages: + continue + for u in usages[etype]: + if u not in vs: + raise api_errors.InappropriateCertificateUse( + cert, ext, u, ", ".join(vs) + ) + # If the extension name is unrecognized and critical, + # then the chain cannot be verified. + elif ext.critical: + raise api_errors.UnsupportedCriticalExtension(cert, ext) + + def verify_chain( + self, + cert, + ca_dict, + cur_pathlen, + use_crls, + required_names=None, + usages=None, + ): + """Validates the certificate against the given trust anchors. + + The 'cert' parameter is the certificate to validate. + + The 'ca_dict' parameter is a dictionary which maps subject + hashes to certs treated as trust anchors. + + The 'cur_pathlen' parameter is an integer indicating how many + certificates have been found between cert and the leaf cert. + + The 'use_crls' parameter is a boolean indicating whether + certificates should be checked to see if they've been revoked. + + The 'required_names' parameter is a set of strings that must + be seen as a CN in the chain of trust for the certificate.""" + + if required_names is None: + required_names = set() + verified = False + continue_loop = True + certs_with_problems = [] + + ca_dict = copy.copy(ca_dict) + for k, v in six.iteritems(self.get_ca_certs()): + if k in ca_dict: + ca_dict[k].extend(v) + else: + ca_dict[k] = v + + def merge_dicts(d1, d2): + """Function for merging usage dictionaries.""" + res = copy.deepcopy(d1) + for k in d2: + if k in res: + res[k].extend(d2[k]) + else: + res[k] = d2[k] + return res + + def discard_names(cert, required_names): + for cert_cn in [ + str(c.value) + for c in cert.subject.get_attributes_for_oid( + x509.oid.NameOID.COMMON_NAME + ) + ]: + required_names.discard(cert_cn) + + if not usages: + usages = {} + for u in POSSIBLE_USES: + usages = merge_dicts(usages, u) + + # Check whether we can validate this certificate. + self.__check_extensions(cert, usages, cur_pathlen) + + # Check whether this certificate has been revoked. + self.__check_revocation(cert, ca_dict, use_crls) + + while continue_loop: + # If this certificate's CN is in the set of required + # names, remove it. + discard_names(cert, required_names) + + # Find the certificate that issued this certificate. + issuer = cert.issuer + issuer_hash = hashlib.sha1(misc.force_bytes(issuer)).hexdigest() + + # See whether this certificate was issued by any of the + # given trust anchors. + for c in ca_dict.get(issuer_hash, []): + if self.__verify_x509_signature(cert, c.public_key()): + verified = True + # Remove any required names found in the + # trust anchor. + discard_names(c, required_names) + # If there are more names to check for + # continue up the chain of trust to look + # for them. + if not required_names: + continue_loop = False + break + + # If the subject and issuer for this certificate are + # identical and the certificate hasn't been verified + # then this is an untrusted self-signed cert and should + # be rejected. + if ( + hashlib.sha1(misc.force_bytes(cert.subject)).hexdigest() + == issuer_hash + ): if not verified: - raise api_errors.BrokenChain(cert, - certs_with_problems) - - alias = property(lambda self: self.__alias, __set_alias, - doc="An alternative name for a publisher.") - - client_uuid = property(lambda self: self.__client_uuid, - __set_client_uuid, - doc="A Universally Unique Identifier (UUID) used to identify a " - "client image to a publisher.") - - client_uuid_time = property(lambda self: self.__client_uuid_time, - __set_client_uuid_time, - doc="The last time that the UUID was generated") - - disabled = property(lambda self: self.__disabled, __set_disabled, - doc="A boolean value indicating whether the publisher should be " - "used for packaging operations.") - - nochild = property(__get_nochild, None, - doc="A boolean value indicating whether the publisher should be " - "present in children.") - - last_refreshed = property(__get_last_refreshed, __set_last_refreshed, - doc="A datetime object representing the time (in UTC) the " - "publisher's selected repository was last refreshed for new " - "metadata (such as catalog updates). 'None' if the publisher " - "hasn't been refreshed yet or the time is not available.") - - meta_root = property(lambda self: self.__meta_root, __set_meta_root, - doc="The absolute pathname of the directory where the publisher's " - "metadata should be written to and read from.") - - prefix = property(lambda self: self.__prefix, __set_prefix, - doc="The name of the publisher.") - - repository = property(lambda self: self.__repository, - __set_repository, - doc="A reference to the selected repository object.") - - sticky = property(lambda self: self.__sticky, __set_stickiness, - doc="Whether or not installed packages from this publisher are" - " always preferred to other publishers.") - - def __get_prop(self, name): - """Accessor method for properties dictionary""" - return self.__properties[name] - - @staticmethod - def __read_list(list_str): - """Take a list in string representation and convert it back - to a Python list.""" - - list_str = misc.force_str(list_str) - # Strip brackets and any whitespace - list_str = list_str.strip("][ ") - # Strip comma and any whitespeace - lst = list_str.split(", ") - # Strip empty whitespace, single, and double quotation marks - lst = [ s.strip("' \"") for s in lst ] - # Eliminate any empty strings - lst = [ s for s in lst if s != '' ] - - return lst - - def __set_prop(self, name, values): - """Accessor method to add a property""" - if self.sys_pub: - raise api_errors.ModifyingSyspubException(_("Cannot " - "set a property for a system publisher. The " - "property was:{0}").format(name)) - - if name == SIGNATURE_POLICY: - self.__sig_policy = None - if isinstance(values, six.string_types): - values = [values] - policy_name = values[0] - if policy_name not in sigpolicy.Policy.policies(): - raise api_errors.InvalidPropertyValue(_( - "{val} is not a valid value for this " - "property:{prop}").format(val=policy_name, - prop=SIGNATURE_POLICY)) - if policy_name == "require-names": - if self.__delay_validation: - # If __delay_validation is set, then - # it's possible that - # signature-required-names was - # set by a previous call to set_prop - # file. If so, don't overwrite the - # values that have already been read. - self.__properties.setdefault( - "signature-required-names", []) - self.__properties[ - "signature-required-names"].extend( - values[1:]) - else: - self.__properties[ - "signature-required-names"] = \ - values[1:] - self.__validate_properties() - else: - if len(values) > 1: - raise api_errors.InvalidPropertyValue(_( - "The {0} signature-policy takes no " - "argument.").format(policy_name)) - self.__properties[SIGNATURE_POLICY] = policy_name - return - if name == "signature-required-names": - if isinstance(values, six.string_types): - values = self.__read_list(values) - self.__properties[name] = values - - def __del_prop(self, name): - """Accessor method for properties""" - if self.sys_pub: - raise api_errors.ModifyingSyspubException(_("Cannot " - "unset a property for a system publisher. The " - "property was:{0}").format(name)) - del self.__properties[name] - - def __prop_iter(self): - return self.__properties.__iter__() - - def __prop_iteritems(self): - """Support iteritems on properties""" - return six.iteritems(self.__properties) - - def __prop_keys(self): - """Support keys() on properties""" - return list(self.__properties.keys()) - - def __prop_values(self): - """Support values() on properties""" - return list(self.__properties.values()) - - def __prop_getdefault(self, name, value): - """Support getdefault() on properties""" - return self.__properties.get(name, value) - - def __prop_setdefault(self, name, value): - """Support setdefault() on properties""" - # Must set it this way so that the logic in __set_prop is used. - try: - return self.__properties[name] - except KeyError: - self.properties[name] = value - return value - - def __prop_update(self, d): - """Support update() on properties""" - - # The logic in __set_prop requires that the item with key - # 'SIGNATURE_POLICY' comes before the item with key - # 'signature-required-names'. - od = collections.OrderedDict(sorted(six.iteritems(d))) - for k, v in six.iteritems(od): - # Must iterate through each value and - # set it this way so that the logic - # in __set_prop is used. - self.properties[k] = v - - def __prop_pop(self, d, default): - """Support pop() on properties""" - if self.sys_pub: - raise api_errors.ModifyingSyspubException(_("Cannot " - "unset a property for a system publisher.")) - return self.__properties.pop(d, default) - - properties = DictProperty(__get_prop, __set_prop, __del_prop, - __prop_iteritems, __prop_keys, __prop_values, __prop_iter, - doc="A dict holding the properties for an image.", - fgetdefault=__prop_getdefault, fsetdefault=__prop_setdefault, - update=__prop_update, pop=__prop_pop) - - @property - def signature_policy(self): - """Return the signature policy for the publisher.""" - - if self.__sig_policy is not None: - return self.__sig_policy - txt = self.properties.get(SIGNATURE_POLICY, - sigpolicy.DEFAULT_POLICY) - names = self.properties.get("signature-required-names", []) - self.__sig_policy = sigpolicy.Policy.policy_factory(txt, names) - return self.__sig_policy + raise api_errors.UntrustedSelfSignedCert(cert) + # This break should break the + # while continue_loop loop. + break + + # If the certificate hasn't been issued by a trust + # anchor or more names need to be found, continue + # looking up the chain of trust. + if continue_loop: + up_chain = False + # Keep track of certs that would have verified + # this certificate but had critical extensions + # we can't handle yet for error reporting. + certs_with_problems = [] + for c in self.__get_certs_by_name(issuer): + # If the certificate is approved to + # sign another certificate, verifies + # the current certificate, and hasn't + # been revoked, consider it as the + # next link in the chain. check_ca + # checks both the basicConstraints + # extension and the keyUsage extension. + if misc.check_ca(c) and self.__verify_x509_signature( + cert, c.public_key() + ): + problem = False + # Check whether this certificate + # has a critical extension we + # don't understand. + try: + self.__check_extensions( + c, CERT_SIGNING_USE, cur_pathlen + ) + self.__check_revocation(c, ca_dict, use_crls) + except ( + api_errors.UnsupportedCriticalExtension, + api_errors.RevokedCertificate, + ) as e: + certs_with_problems.append(e) + problem = True + # If this certificate has no + # problems with it, it's the + # next link in the chain so make + # it the current certificate and + # add one to cur_pathlen since + # there's one more chain cert + # between the code signing cert + # and the root of the chain. + if not problem: + up_chain = True + cert = c + cur_pathlen += 1 + break + # If there's not another link in the chain to be + # found, stop the iteration. + if not up_chain: + continue_loop = False + # If the certificate wasn't verified against a trust anchor, + # raise an exception. + if not verified: + raise api_errors.BrokenChain(cert, certs_with_problems) + + alias = property( + lambda self: self.__alias, + __set_alias, + doc="An alternative name for a publisher.", + ) + + client_uuid = property( + lambda self: self.__client_uuid, + __set_client_uuid, + doc="A Universally Unique Identifier (UUID) used to identify a " + "client image to a publisher.", + ) + + client_uuid_time = property( + lambda self: self.__client_uuid_time, + __set_client_uuid_time, + doc="The last time that the UUID was generated", + ) + + disabled = property( + lambda self: self.__disabled, + __set_disabled, + doc="A boolean value indicating whether the publisher should be " + "used for packaging operations.", + ) + + nochild = property( + __get_nochild, + None, + doc="A boolean value indicating whether the publisher should be " + "present in children.", + ) + + last_refreshed = property( + __get_last_refreshed, + __set_last_refreshed, + doc="A datetime object representing the time (in UTC) the " + "publisher's selected repository was last refreshed for new " + "metadata (such as catalog updates). 'None' if the publisher " + "hasn't been refreshed yet or the time is not available.", + ) + + meta_root = property( + lambda self: self.__meta_root, + __set_meta_root, + doc="The absolute pathname of the directory where the publisher's " + "metadata should be written to and read from.", + ) + + prefix = property( + lambda self: self.__prefix, + __set_prefix, + doc="The name of the publisher.", + ) + + repository = property( + lambda self: self.__repository, + __set_repository, + doc="A reference to the selected repository object.", + ) + + sticky = property( + lambda self: self.__sticky, + __set_stickiness, + doc="Whether or not installed packages from this publisher are" + " always preferred to other publishers.", + ) + + def __get_prop(self, name): + """Accessor method for properties dictionary""" + return self.__properties[name] + + @staticmethod + def __read_list(list_str): + """Take a list in string representation and convert it back + to a Python list.""" + + list_str = misc.force_str(list_str) + # Strip brackets and any whitespace + list_str = list_str.strip("][ ") + # Strip comma and any whitespeace + lst = list_str.split(", ") + # Strip empty whitespace, single, and double quotation marks + lst = [s.strip("' \"") for s in lst] + # Eliminate any empty strings + lst = [s for s in lst if s != ""] + + return lst + + def __set_prop(self, name, values): + """Accessor method to add a property""" + if self.sys_pub: + raise api_errors.ModifyingSyspubException( + _( + "Cannot " + "set a property for a system publisher. The " + "property was:{0}" + ).format(name) + ) + + if name == SIGNATURE_POLICY: + self.__sig_policy = None + if isinstance(values, six.string_types): + values = [values] + policy_name = values[0] + if policy_name not in sigpolicy.Policy.policies(): + raise api_errors.InvalidPropertyValue( + _( + "{val} is not a valid value for this " "property:{prop}" + ).format(val=policy_name, prop=SIGNATURE_POLICY) + ) + if policy_name == "require-names": + if self.__delay_validation: + # If __delay_validation is set, then + # it's possible that + # signature-required-names was + # set by a previous call to set_prop + # file. If so, don't overwrite the + # values that have already been read. + self.__properties.setdefault("signature-required-names", []) + self.__properties["signature-required-names"].extend( + values[1:] + ) + else: + self.__properties["signature-required-names"] = values[1:] + self.__validate_properties() + else: + if len(values) > 1: + raise api_errors.InvalidPropertyValue( + _( + "The {0} signature-policy takes no " "argument." + ).format(policy_name) + ) + self.__properties[SIGNATURE_POLICY] = policy_name + return + if name == "signature-required-names": + if isinstance(values, six.string_types): + values = self.__read_list(values) + self.__properties[name] = values + + def __del_prop(self, name): + """Accessor method for properties""" + if self.sys_pub: + raise api_errors.ModifyingSyspubException( + _( + "Cannot " + "unset a property for a system publisher. The " + "property was:{0}" + ).format(name) + ) + del self.__properties[name] + + def __prop_iter(self): + return self.__properties.__iter__() + + def __prop_iteritems(self): + """Support iteritems on properties""" + return six.iteritems(self.__properties) + + def __prop_keys(self): + """Support keys() on properties""" + return list(self.__properties.keys()) + + def __prop_values(self): + """Support values() on properties""" + return list(self.__properties.values()) + + def __prop_getdefault(self, name, value): + """Support getdefault() on properties""" + return self.__properties.get(name, value) + + def __prop_setdefault(self, name, value): + """Support setdefault() on properties""" + # Must set it this way so that the logic in __set_prop is used. + try: + return self.__properties[name] + except KeyError: + self.properties[name] = value + return value + + def __prop_update(self, d): + """Support update() on properties""" + + # The logic in __set_prop requires that the item with key + # 'SIGNATURE_POLICY' comes before the item with key + # 'signature-required-names'. + od = collections.OrderedDict(sorted(six.iteritems(d))) + for k, v in six.iteritems(od): + # Must iterate through each value and + # set it this way so that the logic + # in __set_prop is used. + self.properties[k] = v + + def __prop_pop(self, d, default): + """Support pop() on properties""" + if self.sys_pub: + raise api_errors.ModifyingSyspubException( + _("Cannot " "unset a property for a system publisher.") + ) + return self.__properties.pop(d, default) + + properties = DictProperty( + __get_prop, + __set_prop, + __del_prop, + __prop_iteritems, + __prop_keys, + __prop_values, + __prop_iter, + doc="A dict holding the properties for an image.", + fgetdefault=__prop_getdefault, + fsetdefault=__prop_setdefault, + update=__prop_update, + pop=__prop_pop, + ) + + @property + def signature_policy(self): + """Return the signature policy for the publisher.""" + + if self.__sig_policy is not None: + return self.__sig_policy + txt = self.properties.get(SIGNATURE_POLICY, sigpolicy.DEFAULT_POLICY) + names = self.properties.get("signature-required-names", []) + self.__sig_policy = sigpolicy.Policy.policy_factory(txt, names) + return self.__sig_policy + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/query_parser.py b/src/modules/client/query_parser.py index 46246ddaf..0e2e67072 100644 --- a/src/modules/client/query_parser.py +++ b/src/modules/client/query_parser.py @@ -32,422 +32,433 @@ from pkg.choose import choose import pkg.query_parser as qp -from pkg.query_parser import BooleanQueryException, ParseError, QueryLengthExceeded +from pkg.query_parser import ( + BooleanQueryException, + ParseError, + QueryLengthExceeded, +) import itertools + class QueryLexer(qp.QueryLexer): - pass + pass + class QueryParser(qp.QueryParser): - """This class exists so that the classes the parent class query parser - uses to build the AST are the ones defined in this module and not the - parent class's module. This is done so that a single query parser can - be shared between the client and server modules but will construct an - AST using the appropriate classes.""" - - def __init__(self, lexer): - qp.QueryParser.__init__(self, lexer) - mod = sys.modules[QueryParser.__module__] - tmp = {} - for class_name in self.query_objs.keys(): - assert hasattr(mod, class_name) - tmp[class_name] = getattr(mod, class_name) - self.query_objs = tmp + """This class exists so that the classes the parent class query parser + uses to build the AST are the ones defined in this module and not the + parent class's module. This is done so that a single query parser can + be shared between the client and server modules but will construct an + AST using the appropriate classes.""" + + def __init__(self, lexer): + qp.QueryParser.__init__(self, lexer) + mod = sys.modules[QueryParser.__module__] + tmp = {} + for class_name in self.query_objs.keys(): + assert hasattr(mod, class_name) + tmp[class_name] = getattr(mod, class_name) + self.query_objs = tmp + # Because many classes do not have client specific modifications, they # simply subclass the parent module's classes. class Query(qp.Query): - pass + pass class AndQuery(qp.AndQuery): - def remove_root(self, img_dir): - lcv = self.lc.remove_root(img_dir) - rcv = self.rc.remove_root(img_dir) - return lcv or rcv + def remove_root(self, img_dir): + lcv = self.lc.remove_root(img_dir) + rcv = self.rc.remove_root(img_dir) + return lcv or rcv class EmptyQuery(object): - def __init__(self, return_type): - self.return_type = return_type - - def search(self, *args): - return [] + def __init__(self, return_type): + self.return_type = return_type + + def search(self, *args): + return [] - def set_info(self, **kwargs): - return + def set_info(self, **kwargs): + return - def __str__(self): - if self.return_type == qp.Query.RETURN_ACTIONS: - return "(a AND b)" - else: - return "<(a AND b)>" + def __str__(self): + if self.return_type == qp.Query.RETURN_ACTIONS: + return "(a AND b)" + else: + return "<(a AND b)>" - def propagate_pkg_return(self): - """Makes this node return packages instead of actions. - Returns None because no changes need to be made to the tree.""" - self.return_type = qp.Query.RETURN_PACKAGES - return None + def propagate_pkg_return(self): + """Makes this node return packages instead of actions. + Returns None because no changes need to be made to the tree.""" + self.return_type = qp.Query.RETURN_PACKAGES + return None class OrQuery(qp.OrQuery): - def remove_root(self, img_dir): - lcv = self.lc.remove_root(img_dir) - if not lcv: - self.lc = EmptyQuery(self.lc.return_type) - rcv = self.rc.remove_root(img_dir) - if not rcv: - self.rc = EmptyQuery(self.rc.return_type) - return lcv or rcv + def remove_root(self, img_dir): + lcv = self.lc.remove_root(img_dir) + if not lcv: + self.lc = EmptyQuery(self.lc.return_type) + rcv = self.rc.remove_root(img_dir) + if not rcv: + self.rc = EmptyQuery(self.rc.return_type) + return lcv or rcv class PkgConversion(qp.PkgConversion): - def remove_root(self, img_dir): - return self.query.remove_root(img_dir) + def remove_root(self, img_dir): + return self.query.remove_root(img_dir) class PhraseQuery(qp.PhraseQuery): - def remove_root(self, img_dir): - return self.query.remove_root(img_dir) + def remove_root(self, img_dir): + return self.query.remove_root(img_dir) class FieldQuery(qp.FieldQuery): - def remove_root(self, img_dir): - return self.query.remove_root(img_dir) + def remove_root(self, img_dir): + return self.query.remove_root(img_dir) class TopQuery(qp.TopQuery): - """This class handles raising the exception if the search was conducted - without using indexes. It yields all results, then raises the - exception.""" + """This class handles raising the exception if the search was conducted + without using indexes. It yields all results, then raises the + exception.""" - def __init__(self, *args, **kwargs): - qp.TopQuery.__init__(self, *args, **kwargs) - self.__use_slow_search = False + def __init__(self, *args, **kwargs): + qp.TopQuery.__init__(self, *args, **kwargs) + self.__use_slow_search = False - def get_use_slow_search(self): - """Return whether slow search has been used.""" + def get_use_slow_search(self): + """Return whether slow search has been used.""" - return self.__use_slow_search + return self.__use_slow_search - def set_use_slow_search(self, val): - """Set whether slow search has been used.""" + def set_use_slow_search(self, val): + """Set whether slow search has been used.""" - self.__use_slow_search = val - - def set_info(self, **kwargs): - """This function provides the necessary information to the AST - so that a search can be performed.""" + self.__use_slow_search = val - qp.TopQuery.set_info(self, - get_use_slow_search=self.get_use_slow_search, - set_use_slow_search=self.set_use_slow_search, - **kwargs) + def set_info(self, **kwargs): + """This function provides the necessary information to the AST + so that a search can be performed.""" - def search(self, *args): - """This function performs performs local client side search. + qp.TopQuery.set_info( + self, + get_use_slow_search=self.get_use_slow_search, + set_use_slow_search=self.set_use_slow_search, + **kwargs, + ) - If slow search was used, then after all results have been - returned, it raises SlowSearchUsed.""" + def search(self, *args): + """This function performs performs local client side search. - for i in qp.TopQuery.search(self, *args): - yield i - if self.__use_slow_search: - raise api_errors.SlowSearchUsed() + If slow search was used, then after all results have been + returned, it raises SlowSearchUsed.""" - def remove_root(self, img_dir): - return self.query.remove_root(img_dir) + for i in qp.TopQuery.search(self, *args): + yield i + if self.__use_slow_search: + raise api_errors.SlowSearchUsed() - def add_or(self, rc): - lc = self.query - if isinstance(rc, TopQuery): - rc = rc.query - self.query = OrQuery(lc, rc) + def remove_root(self, img_dir): + return self.query.remove_root(img_dir) + + def add_or(self, rc): + lc = self.query + if isinstance(rc, TopQuery): + rc = rc.query + self.query = OrQuery(lc, rc) class TermQuery(qp.TermQuery): - """This class handles the client specific search logic for searching - for a base query term.""" - - __client_dict_locks = {} - _global_data_dict = {} - - def __init__(self, term): - qp.TermQuery.__init__(self, term) - self._impl_fmri_to_path = None - self._efn = None - self._data_fast_remove = None - self.full_fmri_hash = None - self._data_fast_add = None - - def __init_gdd(self, path): - gdd = self._global_data_dict - if path in gdd: - return - - # Setup default global dictionary for this index path. - qp.TermQuery.__init_gdd(self, path) - - # Client search needs to account for the packages which have - # been installed or removed since the last time the indexes - # were rebuilt. Add client-specific global data dictionaries - # for this index path. - tq_gdd = gdd[path] - tq_gdd["fast_add"] = ss.IndexStoreSet(ss.FAST_ADD) - tq_gdd["fast_remove"] = ss.IndexStoreSet(ss.FAST_REMOVE) - tq_gdd["fmri_hash"] = ss.IndexStoreSetHash( - ss.FULL_FMRI_HASH_FILE) - - def _lock_client_gdd(self, index_dir): - # This lock is used so that only one instance of a term query - # object is ever modifying the class wide variable for this - # index. - self.__client_dict_locks.setdefault(index_dir, - threading.Lock()).acquire() - - def _unlock_client_gdd(self, index_dir): - self.__client_dict_locks[index_dir].release() - - def set_info(self, gen_installed_pkg_names, get_use_slow_search, - set_use_slow_search, **kwargs): - """This function provides the necessary information to the AST - so that a search can be performed. - - The "gen_installed_pkg_names" parameter is a function which - returns a generator function which iterates over the names of - the installed packages in the image. - - The "get_use_slow_search" parameter is a function that returns - whether slow search has been used. - - The "set_use_slow_search" parameter is a function that sets - whether slow search was used.""" - - self.get_use_slow_search = get_use_slow_search - self._efn = gen_installed_pkg_names() - index_dir = kwargs["index_dir"] - self._lock_client_gdd(index_dir) - try: - try: - qp.TermQuery.set_info(self, - gen_installed_pkg_names=\ - gen_installed_pkg_names, - get_use_slow_search=get_use_slow_search, - set_use_slow_search=set_use_slow_search, - **kwargs) - # Take local copies of the client-only - # dictionaries so that if another thread - # changes the shared data structure, this - # instance's objects won't be affected. - tq_gdd = self._get_gdd(index_dir) - self._data_fast_add = tq_gdd["fast_add"] - self._data_fast_remove = tq_gdd["fast_remove"] - self.full_fmri_hash = tq_gdd["fmri_hash"] - set_use_slow_search(False) - except se.NoIndexException: - # If no index was found, the slower version of - # search will be used. - set_use_slow_search(True) - finally: - self._unlock_client_gdd(index_dir) - - def search(self, restriction, fmris, manifest_func, excludes): - """This function performs performs local client side search. - - The "restriction" parameter is a generator over the results that - another branch of the AST has already found. If it exists, - those results are treated as the domain for search. If it does - not exist, search uses the set of actions from installed - packages as the domain. - - The "fmris" parameter is a function which produces an object - which iterates over the names of installed fmris. - - The "manifest_func" parameter is a function which takes a fmri - and returns a path to the manifest for that fmri. - - The "excludes" parameter is a list of the variants defined for - this image.""" - - if restriction: - return self._restricted_search_internal(restriction) - elif not self.get_use_slow_search(): - try: - self.full_fmri_hash.check_against_file( - self._efn) - except se.IncorrectIndexFileHash: - raise \ - api_errors.IncorrectIndexFileHash() - base_res = \ - self._search_internal(fmris) - client_res = \ - self._search_fast_update(manifest_func, - excludes) - base_res = self._check_fast_remove(base_res) - it = itertools.chain(self._get_results(base_res), - self._get_fast_results(client_res)) - return it - else: - return self.slow_search(fmris, manifest_func, excludes) - - def _check_fast_remove(self, res): - """This function removes any results from the generator "res" - (the search results) that are actions from packages known to - have been removed from the image since the last time the index - was built.""" - - return ( - (p_str, o, a, s, f) - for p_str, o, a, s, f - in res - if not self._data_fast_remove.has_entity(p_str) + """This class handles the client specific search logic for searching + for a base query term.""" + + __client_dict_locks = {} + _global_data_dict = {} + + def __init__(self, term): + qp.TermQuery.__init__(self, term) + self._impl_fmri_to_path = None + self._efn = None + self._data_fast_remove = None + self.full_fmri_hash = None + self._data_fast_add = None + + def __init_gdd(self, path): + gdd = self._global_data_dict + if path in gdd: + return + + # Setup default global dictionary for this index path. + qp.TermQuery.__init_gdd(self, path) + + # Client search needs to account for the packages which have + # been installed or removed since the last time the indexes + # were rebuilt. Add client-specific global data dictionaries + # for this index path. + tq_gdd = gdd[path] + tq_gdd["fast_add"] = ss.IndexStoreSet(ss.FAST_ADD) + tq_gdd["fast_remove"] = ss.IndexStoreSet(ss.FAST_REMOVE) + tq_gdd["fmri_hash"] = ss.IndexStoreSetHash(ss.FULL_FMRI_HASH_FILE) + + def _lock_client_gdd(self, index_dir): + # This lock is used so that only one instance of a term query + # object is ever modifying the class wide variable for this + # index. + self.__client_dict_locks.setdefault( + index_dir, threading.Lock() + ).acquire() + + def _unlock_client_gdd(self, index_dir): + self.__client_dict_locks[index_dir].release() + + def set_info( + self, + gen_installed_pkg_names, + get_use_slow_search, + set_use_slow_search, + **kwargs, + ): + """This function provides the necessary information to the AST + so that a search can be performed. + + The "gen_installed_pkg_names" parameter is a function which + returns a generator function which iterates over the names of + the installed packages in the image. + + The "get_use_slow_search" parameter is a function that returns + whether slow search has been used. + + The "set_use_slow_search" parameter is a function that sets + whether slow search was used.""" + + self.get_use_slow_search = get_use_slow_search + self._efn = gen_installed_pkg_names() + index_dir = kwargs["index_dir"] + self._lock_client_gdd(index_dir) + try: + try: + qp.TermQuery.set_info( + self, + gen_installed_pkg_names=gen_installed_pkg_names, + get_use_slow_search=get_use_slow_search, + set_use_slow_search=set_use_slow_search, + **kwargs, + ) + # Take local copies of the client-only + # dictionaries so that if another thread + # changes the shared data structure, this + # instance's objects won't be affected. + tq_gdd = self._get_gdd(index_dir) + self._data_fast_add = tq_gdd["fast_add"] + self._data_fast_remove = tq_gdd["fast_remove"] + self.full_fmri_hash = tq_gdd["fmri_hash"] + set_use_slow_search(False) + except se.NoIndexException: + # If no index was found, the slower version of + # search will be used. + set_use_slow_search(True) + finally: + self._unlock_client_gdd(index_dir) + + def search(self, restriction, fmris, manifest_func, excludes): + """This function performs performs local client side search. + + The "restriction" parameter is a generator over the results that + another branch of the AST has already found. If it exists, + those results are treated as the domain for search. If it does + not exist, search uses the set of actions from installed + packages as the domain. + + The "fmris" parameter is a function which produces an object + which iterates over the names of installed fmris. + + The "manifest_func" parameter is a function which takes a fmri + and returns a path to the manifest for that fmri. + + The "excludes" parameter is a list of the variants defined for + this image.""" + + if restriction: + return self._restricted_search_internal(restriction) + elif not self.get_use_slow_search(): + try: + self.full_fmri_hash.check_against_file(self._efn) + except se.IncorrectIndexFileHash: + raise api_errors.IncorrectIndexFileHash() + base_res = self._search_internal(fmris) + client_res = self._search_fast_update(manifest_func, excludes) + base_res = self._check_fast_remove(base_res) + it = itertools.chain( + self._get_results(base_res), self._get_fast_results(client_res) + ) + return it + else: + return self.slow_search(fmris, manifest_func, excludes) + + def _check_fast_remove(self, res): + """This function removes any results from the generator "res" + (the search results) that are actions from packages known to + have been removed from the image since the last time the index + was built.""" + + return ( + (p_str, o, a, s, f) + for p_str, o, a, s, f in res + if not self._data_fast_remove.has_entity(p_str) + ) + + def _search_fast_update(self, manifest_func, excludes): + """This function searches the packages which have been + installed since the last time the index was rebuilt. + + The "manifest_func" parameter is a function which maps fmris to + the path to their manifests. + + The "excludes" parameter is a list of variants defined in the + image.""" + + assert self._data_main_dict.get_file_handle() is not None + + glob = self._glob + term = self._term + case_sensitive = self._case_sensitive + + if not case_sensitive: + glob = True + + fast_update_dict = {} + + fast_update_res = [] + + # self._data_fast_add holds the names of the fmris added + # since the last time the index was rebuilt. + for fmri_str in self._data_fast_add._set: + if not (self.pkg_name_wildcard or self.pkg_name_match(fmri_str)): + continue + f = fmri.PkgFmri(fmri_str) + path = manifest_func(f) + search_dict = manifest.Manifest.search_dict( + path, return_line=True, excludes=excludes + ) + for tmp in search_dict: + tok, at, st, fv = tmp + if not ( + self.action_type_wildcard or at == self.action_type + ) or not (self.key_wildcard or st == self.key): + continue + if tok not in fast_update_dict: + fast_update_dict[tok] = [] + fast_update_dict[tok].append( + (at, st, fv, fmri_str, search_dict[tmp]) + ) + if glob: + keys = fast_update_dict.keys() + matches = choose(keys, term, case_sensitive) + fast_update_res = [fast_update_dict[m] for m in matches] + + else: + if term in fast_update_dict: + fast_update_res.append(fast_update_dict[term]) + return fast_update_res + + def _get_fast_results(self, fast_update_res): + """This function transforms the output of _search_fast_update + to match that of _search_internal.""" + + for sub_list in fast_update_res: + for at, st, fv, fmri_str, line_list in sub_list: + for l in line_list: + yield at, st, fmri_str, fv, l + + def slow_search(self, fmris, manifest_func, excludes): + """This function performs search when no prebuilt index is + available. + + The "fmris" parameter is a generator function which iterates + over the packages to be searched. + + The "manifest_func" parameter is a function which maps fmris to + the path to their manifests. + + The "excludes" parameter is a list of variants defined in the + image.""" + + for pfmri in list(fmris()): + fmri_str = pfmri.get_fmri(anarchy=True, include_scheme=False) + if not (self.pkg_name_wildcard or self.pkg_name_match(fmri_str)): + continue + manf = manifest_func(pfmri) + fast_update_dict = {} + fast_update_res = [] + glob = self._glob + term = self._term + case_sensitive = self._case_sensitive + + if not case_sensitive: + glob = True + + search_dict = manifest.Manifest.search_dict( + manf, return_line=True, excludes=excludes + ) + for tmp in search_dict: + tok, at, st, fv = tmp + if not ( + self.action_type_wildcard or at == self.action_type + ) or not (self.key_wildcard or st == self.key): + continue + if tok not in fast_update_dict: + fast_update_dict[tok] = [] + fast_update_dict[tok].append( + (at, st, fv, fmri_str, search_dict[tmp]) ) + if glob: + keys = fast_update_dict.keys() + matches = choose(keys, term, case_sensitive) + fast_update_res = [fast_update_dict[m] for m in matches] + else: + if term in fast_update_dict: + fast_update_res.append(fast_update_dict[term]) + for sub_list in fast_update_res: + for at, st, fv, fmri_str, line_list in sub_list: + for l in line_list: + yield at, st, fmri_str, fv, l + + def _read_pkg_dirs(self, fmris): + """Legacy function used to search indexes which have a pkg + directory with fmri offset information instead of the + fmri_offsets.v1 file. This function is in this subclass to + translate the error from a search_error to an api_error.""" + + try: + return qp.TermQuery._read_pkg_dirs(self, fmris) + except se.InconsistentIndexException as e: + raise api_errors.InconsistentIndexException(e) + + def remove_root(self, img_root): + if ( + ( + not self.action_type_wildcard + and self.action_type != "file" + and self.action_type != "link" + and self.action_type != "hardlink" + and self.action_type != "directory" + ) + or (not self.key_wildcard and self.key != "path") + or (not self._term.startswith(img_root) or img_root == "/") + ): + return False + img_root = img_root.rstrip("/") + self._term = self._term[len(img_root) :] + self.key = "path" + self.key_wildcard = False + return True - def _search_fast_update(self, manifest_func, excludes): - """This function searches the packages which have been - installed since the last time the index was rebuilt. - - The "manifest_func" parameter is a function which maps fmris to - the path to their manifests. - - The "excludes" parameter is a list of variants defined in the - image.""" - - assert self._data_main_dict.get_file_handle() is not None - - glob = self._glob - term = self._term - case_sensitive = self._case_sensitive - - if not case_sensitive: - glob = True - - fast_update_dict = {} - - fast_update_res = [] - - # self._data_fast_add holds the names of the fmris added - # since the last time the index was rebuilt. - for fmri_str in self._data_fast_add._set: - if not (self.pkg_name_wildcard or - self.pkg_name_match(fmri_str)): - continue - f = fmri.PkgFmri(fmri_str) - path = manifest_func(f) - search_dict = manifest.Manifest.search_dict(path, - return_line=True, excludes=excludes) - for tmp in search_dict: - tok, at, st, fv = tmp - if not (self.action_type_wildcard or - at == self.action_type) or \ - not (self.key_wildcard or st == self.key): - continue - if tok not in fast_update_dict: - fast_update_dict[tok] = [] - fast_update_dict[tok].append((at, st, fv, - fmri_str, search_dict[tmp])) - if glob: - keys = fast_update_dict.keys() - matches = choose(keys, term, case_sensitive) - fast_update_res = [ - fast_update_dict[m] for m in matches - ] - - else: - if term in fast_update_dict: - fast_update_res.append(fast_update_dict[term]) - return fast_update_res - - def _get_fast_results(self, fast_update_res): - """This function transforms the output of _search_fast_update - to match that of _search_internal.""" - - for sub_list in fast_update_res: - for at, st, fv, fmri_str, line_list in sub_list: - for l in line_list: - yield at, st, fmri_str, fv, l - - def slow_search(self, fmris, manifest_func, excludes): - """This function performs search when no prebuilt index is - available. - - The "fmris" parameter is a generator function which iterates - over the packages to be searched. - - The "manifest_func" parameter is a function which maps fmris to - the path to their manifests. - - The "excludes" parameter is a list of variants defined in the - image.""" - - for pfmri in list(fmris()): - fmri_str = pfmri.get_fmri(anarchy=True, - include_scheme=False) - if not (self.pkg_name_wildcard or - self.pkg_name_match(fmri_str)): - continue - manf = manifest_func(pfmri) - fast_update_dict = {} - fast_update_res = [] - glob = self._glob - term = self._term - case_sensitive = self._case_sensitive - - if not case_sensitive: - glob = True - - search_dict = manifest.Manifest.search_dict(manf, - return_line=True, excludes=excludes) - for tmp in search_dict: - tok, at, st, fv = tmp - if not (self.action_type_wildcard or - at == self.action_type) or \ - not (self.key_wildcard or st == self.key): - continue - if tok not in fast_update_dict: - fast_update_dict[tok] = [] - fast_update_dict[tok].append((at, st, fv, - fmri_str, search_dict[tmp])) - if glob: - keys = fast_update_dict.keys() - matches = choose(keys, term, case_sensitive) - fast_update_res = [ - fast_update_dict[m] for m in matches - ] - else: - if term in fast_update_dict: - fast_update_res.append( - fast_update_dict[term]) - for sub_list in fast_update_res: - for at, st, fv, fmri_str, line_list in sub_list: - for l in line_list: - yield at, st, fmri_str, fv, l - - def _read_pkg_dirs(self, fmris): - """Legacy function used to search indexes which have a pkg - directory with fmri offset information instead of the - fmri_offsets.v1 file. This function is in this subclass to - translate the error from a search_error to an api_error.""" - - try: - return qp.TermQuery._read_pkg_dirs(self, fmris) - except se.InconsistentIndexException as e: - raise api_errors.InconsistentIndexException(e) - - def remove_root(self, img_root): - if (not self.action_type_wildcard and - self.action_type != "file" and - self.action_type != "link" and - self.action_type != "hardlink" and - self.action_type != "directory") or \ - (not self.key_wildcard and self.key != "path") or \ - (not self._term.startswith(img_root) or img_root == "/"): - return False - img_root = img_root.rstrip("/") - self._term = self._term[len(img_root):] - self.key = "path" - self.key_wildcard = False - return True # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/sigpolicy.py b/src/modules/client/sigpolicy.py index bb3a18f8a..73a821a12 100644 --- a/src/modules/client/sigpolicy.py +++ b/src/modules/client/sigpolicy.py @@ -28,171 +28,179 @@ import pkg.client.api_errors as apx from functools import total_ordering + @total_ordering class Policy(object): - """Abstract base Policy class. It defines the interface all subclasses - must provide. + """Abstract base Policy class. It defines the interface all subclasses + must provide. - Each subclass must also define its "strictness". - Strictness is a positive integer and is relative to the other - subclasses in existence. More than one subclass may have the same - strictness level. In the abscence of other information, when combining - two policies, the result is the stricter policy.""" + Each subclass must also define its "strictness". + Strictness is a positive integer and is relative to the other + subclasses in existence. More than one subclass may have the same + strictness level. In the abscence of other information, when combining + two policies, the result is the stricter policy.""" - _policies = {} + _policies = {} - def __init__(self, *args, **kwargs): - # This method exists to provide a consistent __init__ method - # for the factory below. - object.__init__(self) + def __init__(self, *args, **kwargs): + # This method exists to provide a consistent __init__ method + # for the factory below. + object.__init__(self) - def process_signatures(self, sigs, acts, pub, trust_anchors, - use_crls): - """Check that the signatures ("sigs") verify against the actions - ("acts") using the publisher ("pub") as the repository for - certificates and "trust_anchors" as the dictionary of trust - anchors. + def process_signatures(self, sigs, acts, pub, trust_anchors, use_crls): + """Check that the signatures ("sigs") verify against the actions + ("acts") using the publisher ("pub") as the repository for + certificates and "trust_anchors" as the dictionary of trust + anchors. - Not implemented in the base class.""" - raise NotImplementedError() + Not implemented in the base class.""" + raise NotImplementedError() - def __lt__(self, other): - return self.strictness < other.strictness + def __lt__(self, other): + return self.strictness < other.strictness - def __eq__(self, other): - return self.strictness == other.strictness + def __eq__(self, other): + return self.strictness == other.strictness - __hash__ = None + __hash__ = None - def combine(self, other): - """If the other signature policy is more strict than this - policy, use the other policy. Otherwise, use this policy.""" + def combine(self, other): + """If the other signature policy is more strict than this + policy, use the other policy. Otherwise, use this policy.""" - if self > other: - return self - return other + if self > other: + return self + return other - def __str__(self): - return self.name + def __str__(self): + return self.name - @staticmethod - def policies(): - """Return the names of the signature policies available.""" + @staticmethod + def policies(): + """Return the names of the signature policies available.""" - return set(Policy._policies.keys()) + return set(Policy._policies.keys()) - @staticmethod - def policy_factory(name, *args, **kwargs): - """Given the name of a policy, return a new policy object of - that type.""" + @staticmethod + def policy_factory(name, *args, **kwargs): + """Given the name of a policy, return a new policy object of + that type.""" - assert name in Policy._policies - return Policy._policies[name](*args, **kwargs) + assert name in Policy._policies + return Policy._policies[name](*args, **kwargs) class Ignore(Policy): - """This policy ignores all signatures except to attempt to retrieve - any certificates that might be needed if the policy changes.""" + """This policy ignores all signatures except to attempt to retrieve + any certificates that might be needed if the policy changes.""" + + strictness = 1 + name = "ignore" - strictness = 1 - name = "ignore" + def process_signatures(self, sigs, acts, pub, trust_anchors, use_crls): + """Since this policy ignores signatures, only download the + certificates that might be needed so that they're present if + the policy changes later.""" - def process_signatures(self, sigs, acts, pub, trust_anchors, - use_crls): - """Since this policy ignores signatures, only download the - certificates that might be needed so that they're present if - the policy changes later.""" + for s in sigs: + s.retrieve_chain_certs(pub) - for s in sigs: - s.retrieve_chain_certs(pub) Policy._policies[Ignore.name] = Ignore class Verify(Policy): - """This policy verifies that all signatures present are valid but - doesn't require that a signature be present.""" + """This policy verifies that all signatures present are valid but + doesn't require that a signature be present.""" + + strictness = 2 + name = "verify" - strictness = 2 - name = "verify" + def process_signatures(self, sigs, acts, pub, trust_anchors, use_crls): + """Check that all signatures present are valid signatures.""" - def process_signatures(self, sigs, acts, pub, trust_anchors, - use_crls): - """Check that all signatures present are valid signatures.""" + # Ensure that acts can be iterated over repeatedly. + acts = list(acts) + for s in sigs: + s.verify_sig(acts, pub, trust_anchors, use_crls) - # Ensure that acts can be iterated over repeatedly. - acts = list(acts) - for s in sigs: - s.verify_sig(acts, pub, trust_anchors, use_crls) Policy._policies[Verify.name] = Verify + class RequireSigs(Policy): - """This policy that all signatures present are valid and insists that - at least one signature is seen with each package.""" - - strictness = 3 - name = "require-signatures" - - def process_signatures(self, sigs, acts, pub, trust_anchors, - use_crls): - """Check that all signatures present are valid signatures and - at least one signature action which has been signed with a - private key is present.""" - - # Ensure that acts can be iterated over repeatedly. - acts = list(acts) - verified = False - for s in sigs: - verified |= \ - bool(s.verify_sig(acts, pub, trust_anchors, - use_crls)) and \ - s.is_signed() - if not verified: - raise apx.RequiredSignaturePolicyException(pub) + """This policy that all signatures present are valid and insists that + at least one signature is seen with each package.""" + + strictness = 3 + name = "require-signatures" + + def process_signatures(self, sigs, acts, pub, trust_anchors, use_crls): + """Check that all signatures present are valid signatures and + at least one signature action which has been signed with a + private key is present.""" + + # Ensure that acts can be iterated over repeatedly. + acts = list(acts) + verified = False + for s in sigs: + verified |= ( + bool(s.verify_sig(acts, pub, trust_anchors, use_crls)) + and s.is_signed() + ) + if not verified: + raise apx.RequiredSignaturePolicyException(pub) + Policy._policies[RequireSigs.name] = RequireSigs class RequireNames(Policy): - """This policy that all signatures present are valid and insists that - at least one signature is seen with each package. In addition, it has - a set of names that must seen as CN's in the chain of trust.""" - - strictness = 4 - name = "require-names" - def __init__(self, req_names, *args, **kwargs): - assert req_names, "RequireNames requires at least one name " \ - "to be passed to the constructor." - Policy.__init__(self, *args, **kwargs) - if isinstance(req_names, six.string_types): - req_names = [req_names] - self.required_names = frozenset(req_names) - - def process_signatures(self, sigs, acts, pub, trust_anchors, - use_crls): - acts = list(acts) - missing_names = set(self.required_names) - verified = False - for s in sigs: - verified |= bool(s.verify_sig(acts, pub, trust_anchors, - use_crls, missing_names)) and \ - s.is_signed() - if missing_names: - raise apx.MissingRequiredNamesException(pub, - missing_names) - - def combine(self, other): - """Determines how RequireNames policies combine with another - policy. If the other policy is also a RequireNames policy, - the result is a policy which requires the union of both policies - required names.""" - - if self > other: - return self - if other > self: - return other - return RequireNames(self.required_names | other.required_names) + """This policy that all signatures present are valid and insists that + at least one signature is seen with each package. In addition, it has + a set of names that must seen as CN's in the chain of trust.""" + + strictness = 4 + name = "require-names" + + def __init__(self, req_names, *args, **kwargs): + assert req_names, ( + "RequireNames requires at least one name " + "to be passed to the constructor." + ) + Policy.__init__(self, *args, **kwargs) + if isinstance(req_names, six.string_types): + req_names = [req_names] + self.required_names = frozenset(req_names) + + def process_signatures(self, sigs, acts, pub, trust_anchors, use_crls): + acts = list(acts) + missing_names = set(self.required_names) + verified = False + for s in sigs: + verified |= ( + bool( + s.verify_sig( + acts, pub, trust_anchors, use_crls, missing_names + ) + ) + and s.is_signed() + ) + if missing_names: + raise apx.MissingRequiredNamesException(pub, missing_names) + + def combine(self, other): + """Determines how RequireNames policies combine with another + policy. If the other policy is also a RequireNames policy, + the result is a policy which requires the union of both policies + required names.""" + + if self > other: + return self + if other > self: + return other + return RequireNames(self.required_names | other.required_names) + Policy._policies[RequireNames.name] = RequireNames @@ -200,4 +208,4 @@ def combine(self, other): # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/__init__.py b/src/modules/client/transport/__init__.py index a0c52dee8..8c2fb45b9 100644 --- a/src/modules/client/transport/__init__.py +++ b/src/modules/client/transport/__init__.py @@ -24,8 +24,8 @@ # Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. # -__all__ = [ "transport" ] +__all__ = ["transport"] # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/engine.py b/src/modules/client/transport/engine.py index 08e9674b5..75b5fbdd3 100644 --- a/src/modules/client/transport/engine.py +++ b/src/modules/client/transport/engine.py @@ -38,1150 +38,1270 @@ # Need to ignore SIGPIPE if using pycurl in NOSIGNAL mode. try: - import signal - if hasattr(signal, "SIGPIPE"): - signal.signal(signal.SIGPIPE, signal.SIG_IGN) + import signal + + if hasattr(signal, "SIGPIPE"): + signal.signal(signal.SIGPIPE, signal.SIG_IGN) except ImportError: - pass + pass -import pkg.client.api_errors as api_errors -import pkg.client.transport.exception as tx -import pkg.client.transport.fileobj as fileobj -import pkg.misc as misc +import pkg.client.api_errors as api_errors +import pkg.client.transport.exception as tx +import pkg.client.transport.fileobj as fileobj +import pkg.misc as misc -from collections import deque -from pkg.client import global_settings +from collections import deque +from pkg.client import global_settings from pkg.client.debugvalues import DebugValues pipelined_protocols = () response_protocols = ("ftp", "http", "https") + class TransportEngine(object): - """This is an abstract class. It shouldn't implement any - of the methods that it contains. Leave that to transport-specific - implementations.""" + """This is an abstract class. It shouldn't implement any + of the methods that it contains. Leave that to transport-specific + implementations.""" class CurlTransportEngine(TransportEngine): - """Concrete class of TransportEngine for libcurl transport.""" - - def __init__(self, transport, max_conn=20): - - # Backpointer to transport object - self.__xport = transport - # Curl handles - self.__mhandle = pycurl.CurlMulti() - self.__chandles = [] - self.__active_handles = 0 - self.__max_handles = max_conn - # Request queue - self.__req_q = deque() - # List of failures - self.__failures = [] - # List of URLs successfully transferred - self.__success = [] - # List of Orphaned URLs. - self.__orphans = set() - # Set default file buffer size at 128k, callers override - # this setting after looking at VFS block size. - self.__file_bufsz = 131072 - # Header bits and pieces - self.__user_agent = None - self.__common_header = {} - self.__last_stall_check = 0 - - # Set options on multi-handle - self.__mhandle.setopt(pycurl.M_PIPELINING, 0) - - # initialize easy handles - for i in range(self.__max_handles): - eh = pycurl.Curl() - eh.url = None - eh.repourl = None - eh.fobj = None - eh.r_fobj = None - eh.filepath = None - eh.success = False - eh.fileprog = None - eh.filetime = -1 - eh.starttime = -1 - eh.uuid = None - self.__chandles.append(eh) - - # copy handles into handle freelist - self.__freehandles = self.__chandles[:] - - def __call_perform(self): - """An internal method that invokes the multi-handle's - perform method.""" - - while 1: - ret, active_handles = self.__mhandle.perform() - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - - self.__active_handles = active_handles - return ret - - def add_url(self, url, filepath=None, writefunc=None, header=None, - progclass=None, progtrack=None, sslcert=None, sslkey=None, - repourl=None, compressible=False, failonerror=True, proxy=None, - runtime_proxy=None): - """Add a URL to the transport engine. Caller must supply - either a filepath where the file should be downloaded, - or a callback to a function that will peform the write. - It may also optionally supply header information - in a dictionary. If the caller has a ProgressTracker, - it should pass the tracker in progtrack. The caller should - also supply a class that wraps the tracker in progclass. - - 'proxy' is the persistent proxy value for this url and is - stored as part of the transport stats accounting. - - 'runtime_proxy' is the actual proxy value that is used by pycurl - to retrieve this resource.""" - - t = TransportRequest(url, filepath=filepath, - writefunc=writefunc, header=header, progclass=progclass, - progtrack=progtrack, sslcert=sslcert, sslkey=sslkey, - repourl=repourl, compressible=compressible, - failonerror=failonerror, proxy=proxy, - runtime_proxy=runtime_proxy) - - self.__req_q.appendleft(t) - - def __check_for_stalls(self): - """In some situations, libcurl can get itself - tied in a knot, and fail to make progress. Check that the - active handles are making progress. If none of the active - handles have downloaded any content for the timeout period, - reset the transport and generate exceptions for the failed - requests.""" - - timeout = global_settings.PKG_CLIENT_LOWSPEED_TIMEOUT - if timeout == 0: - return - current_time = time.time() - time_list = [] - size_list = [] - failures = [] - q_hdls = [ - hdl for hdl in self.__chandles - if hdl not in self.__freehandles - ] - - # time.time() is based upon system clock. Check that - # our time hasn't been set backwards. If time is set forward, - # we'll have to expire the handles. There's no way to detect - # this until python properly implements gethrtime(). Solaris - # implementations of time.clock() appear broken. - - for h in q_hdls: - time_elapsed = current_time - h.starttime - if time_elapsed < 0: - h.starttime = current_time - time_elapsed = 0 - size_xfrd = h.getinfo(pycurl.SIZE_DOWNLOAD) + \ - h.getinfo(pycurl.SIZE_UPLOAD) - time_list.append(time_elapsed) - size_list.append(size_xfrd) - - # If timeout is smaller than smallest elapsed time, - # and no data has been transferred, abort. - if timeout < min(time_list) and max(size_list) == 0: - for h in q_hdls: - url = h.url - uuid = h.uuid - urlstem = h.repourl - ex = tx.TransportStallError(url, - repourl=urlstem, uuid=uuid) - - self.__mhandle.remove_handle(h) - self.__teardown_handle(h) - self.__freehandles.append(h) - - failures.append(ex) - - self.__failures.extend(failures) - - - def __cleanup_requests(self): - """Cleanup handles that have finished their request. - Return the handles to the freelist. Generate any - relevant error information.""" - - count, good, bad = self.__mhandle.info_read() - failures = self.__failures - success = self.__success - done_handles = [] - ex_to_raise = None - visited_repos = set() - errors_seen = 0 - - for h, en, em in bad: - # Get statistics for each handle. - # As new properties are added to URIs that differentiate - # them, the tuple used to index the __xport.stats entry - # should also include those properties so that we can - # track statistics uniquely for each RepoURI. That is, - # the format of the keys of the __xport.stats dictionary - # should match the one generated by - # pkg.client.publisher.TransportRepoURI.key() - repostats = self.__xport.stats[(h.repourl, h.proxy)] - visited_repos.add(repostats) - repostats.record_tx() - nbytes = h.getinfo(pycurl.SIZE_DOWNLOAD) - seconds = h.getinfo(pycurl.TOTAL_TIME) - conn_count = h.getinfo(pycurl.NUM_CONNECTS) - conn_time = h.getinfo(pycurl.CONNECT_TIME) - - url = h.url - uuid = h.uuid - urlstem = h.repourl - proto = urlsplit(url)[0] - - # When using pipelined operations, libcurl tracks the - # amount of time taken for the entire pipelined request - # as opposed to just the amount of time for a single - # file in the pipeline. So, if the connection time is 0 - # for a request using http(s), then it was pipelined and - # the total time must be obtained by subtracting the - # time the transfer of the individual request started - # from the total time. - if conn_time == 0 and proto in pipelined_protocols: - # Only performing this subtraction when the - # conn_time is 0 allows the first request in - # the pipeline to properly include connection - # time, etc. to initiate the transfer. - seconds -= h.getinfo(pycurl.STARTTRANSFER_TIME) - elif conn_time > 0: - seconds -= conn_time - - # Sometimes libcurl will report no transfer time. - # In that case, just use starttransfer time if it's - # non-zero. - if seconds < 0: - seconds = h.getinfo(pycurl.STARTTRANSFER_TIME) - - repostats.record_progress(nbytes, seconds) - - # Only count connections if the connection time is - # positive for http(s); for all other protocols, - # record the connection regardless. - if conn_count > 0 and conn_time > 0: - repostats.record_connection(conn_time) - - respcode = h.getinfo(pycurl.RESPONSE_CODE) - - # If we were cancelled, raise an API error. - # Otherwise fall through to transport's exception - # generation. - if en == pycurl.E_ABORTED_BY_CALLBACK: - ex = None - ex_to_raise = api_errors.CanceledException - elif en in (pycurl.E_HTTP_RETURNED_ERROR, - pycurl.E_FILE_COULDNT_READ_FILE): - # E_HTTP_RETURNED_ERROR is only used for http:// - # and https://, but a more specific reason for - # failure can be obtained from respcode. - # - # E_FILE_COULDNT_READ_FILE is only used for - # file://, but unfortunately can mean ENOENT, - # EPERM, etc. and libcurl doesn't differentiate - # or provide a respcode. - if proto not in response_protocols: - # For protocols that don't provide a - # pycurl.RESPONSE_CODE, use the - # pycurl error number instead. - respcode = en - proto_reason = None - if proto in tx.proto_code_map: - # Look up protocol error code map - # from transport exception's table. - pmap = tx.proto_code_map[proto] - if respcode in pmap: - proto_reason = pmap[respcode] - ex = tx.TransportProtoError(proto, respcode, - url, reason=proto_reason, repourl=urlstem, - uuid=uuid) - repostats.record_error(decayable=ex.decayable) - errors_seen += 1 - else: - timeout = en == pycurl.E_OPERATION_TIMEOUTED - ex = tx.TransportFrameworkError(en, url, em, - repourl=urlstem, uuid=uuid) - repostats.record_error(decayable=ex.decayable, - timeout=timeout) - errors_seen += 1 - - if ex and ex.retryable: - failures.append(ex) - elif ex and not ex_to_raise: - ex_to_raise = ex - - done_handles.append(h) - - for h in good: - # Get statistics for each handle. - repostats = self.__xport.stats[(h.repourl, h.proxy)] - visited_repos.add(repostats) - repostats.record_tx() - nbytes = h.getinfo(pycurl.SIZE_DOWNLOAD) - seconds = h.getinfo(pycurl.TOTAL_TIME) - conn_count = h.getinfo(pycurl.NUM_CONNECTS) - conn_time = h.getinfo(pycurl.CONNECT_TIME) - h.filetime = h.getinfo(pycurl.INFO_FILETIME) - - url = h.url - uuid = h.uuid - urlstem = h.repourl - proto = urlsplit(url)[0] - - # When using pipelined operations, libcurl tracks the - # amount of time taken for the entire pipelined request - # as opposed to just the amount of time for a single - # file in the pipeline. So, if the connection time is 0 - # for a request using http(s), then it was pipelined and - # the total time must be obtained by subtracting the - # time the transfer of the individual request started - # from the total time. - if conn_time == 0 and proto in pipelined_protocols: - # Only performing this subtraction when the - # conn_time is 0 allows the first request in - # the pipeline to properly include connection - # time, etc. to initiate the transfer and - # the correct calculations of bytespersec. - seconds -= h.getinfo(pycurl.STARTTRANSFER_TIME) - elif conn_time > 0: - seconds -= conn_time - - if seconds > 0: - bytespersec = nbytes // seconds - else: - bytespersec = 0 - - # If a request ahead of a successful request fails due - # to a timeout, sometimes libcurl will report impossibly - # large total time values. In this case, check that the - # nbytes/sec exceeds our minimum threshold. If it does - # not, and the total time is longer than our timeout, - # discard the time calculation as it is bogus. - if (bytespersec < - global_settings.pkg_client_lowspeed_limit) and ( - seconds > - global_settings.PKG_CLIENT_LOWSPEED_TIMEOUT): - nbytes = 0 - seconds = 0 - repostats.record_progress(nbytes, seconds) - - # Only count connections if the connection time is - # positive for http(s); for all other protocols, - # record the connection regardless. - if conn_count > 0 and conn_time > 0: - repostats.record_connection(conn_time) - - respcode = h.getinfo(pycurl.RESPONSE_CODE) - - if proto not in response_protocols or \ - respcode == http_client.OK: - h.success = True - repostats.clear_consecutive_errors() - success.append(url) - else: - proto_reason = None - if proto in tx.proto_code_map: - # Look up protocol error code map - # from transport exception's table. - pmap = tx.proto_code_map[proto] - if respcode in pmap: - proto_reason = pmap[respcode] - ex = tx.TransportProtoError(proto, - respcode, url, reason=proto_reason, - repourl=urlstem, uuid=uuid) - - # If code >= 400, record this as an error. - # Handlers above the engine get to decide - # for 200/300 codes that aren't OK - if respcode >= 400: - repostats.record_error( - decayable=ex.decayable) - errors_seen += 1 - # If code == 0, libcurl failed to read - # any HTTP status. Response is almost - # certainly corrupted. - elif respcode == 0: - repostats.record_error() - errors_seen += 1 - reason = "Invalid HTTP status code " \ - "from server" - ex = tx.TransportProtoError(proto, - url=url, reason=reason, - repourl=urlstem, uuid=uuid) - ex.retryable = True - - # Stash retryable failures, arrange - # to raise first fatal error after - # cleanup. - if ex.retryable: - failures.append(ex) - elif not ex_to_raise: - ex_to_raise = ex - - done_handles.append(h) - - # Call to remove_handle must be separate from info_read() - for h in done_handles: - self.__mhandle.remove_handle(h) - self.__teardown_handle(h) - self.__freehandles.append(h) - - self.__failures = failures - self.__success = success - - if ex_to_raise: - raise ex_to_raise - - # Don't bother to check the transient error count if no errors - # were encountered in this transaction. - if errors_seen == 0: - return - - # If errors were encountered, but no exception raised, - # check if the maximum number of transient failures has - # been exceeded at any of the endpoints that were visited - # during this transaction. - for rs in visited_repos: - numce = rs.consecutive_errors - if numce >= \ - global_settings.PKG_CLIENT_MAX_CONSECUTIVE_ERROR: - # Reset consecutive error count before raising - # this exception. - rs.clear_consecutive_errors() - raise tx.ExcessiveTransientFailure(rs.url, - numce) - - def check_status(self, urllist=None, good_reqs=False): - """Return information about retryable failures that occured - during the request. - - This is a list of transport exceptions. Caller - may raise these, or process them for failure information. - - Urllist is an optional argument to return only failures - for a specific URLs. Not all callers of check status - want to claim the error state of all pending transactions. - - Transient errors are part of standard control flow. - The caller will look at these and decide whether - to throw them or not. Permanent failures are raised - by the transport engine as soon as they occur. - - If good_reqs is set to true, then check_stats will - return a tuple of lists, the first list contains the - transient errors that were encountered, the second list - contains successfully transferred urls. Because the - list of successfully transferred URLs may be long, - it is discarded if not requested by the caller.""" - - # if list not specified, return all failures - if not urllist: - rf = self.__failures - rs = self.__success - self.__failures = [] - self.__success = [] - - if good_reqs: - return rf, rs - - return rf - - # otherwise, look for failures that match just the URLs - # in urllist. - rf = [ - tf - for tf in self.__failures - if hasattr(tf, "url") and tf.url in urllist - ] - - # remove failues in separate pass, or else for loop gets - # confused. - for f in rf: - self.__failures.remove(f) - - if not good_reqs: - self.__success = [] - return rf - - rs = [] - - for ts in self.__success: - if ts in urllist: - rs.append(ts) - - for s in rs: - self.__success.remove(s) - + """Concrete class of TransportEngine for libcurl transport.""" + + def __init__(self, transport, max_conn=20): + # Backpointer to transport object + self.__xport = transport + # Curl handles + self.__mhandle = pycurl.CurlMulti() + self.__chandles = [] + self.__active_handles = 0 + self.__max_handles = max_conn + # Request queue + self.__req_q = deque() + # List of failures + self.__failures = [] + # List of URLs successfully transferred + self.__success = [] + # List of Orphaned URLs. + self.__orphans = set() + # Set default file buffer size at 128k, callers override + # this setting after looking at VFS block size. + self.__file_bufsz = 131072 + # Header bits and pieces + self.__user_agent = None + self.__common_header = {} + self.__last_stall_check = 0 + + # Set options on multi-handle + self.__mhandle.setopt(pycurl.M_PIPELINING, 0) + + # initialize easy handles + for i in range(self.__max_handles): + eh = pycurl.Curl() + eh.url = None + eh.repourl = None + eh.fobj = None + eh.r_fobj = None + eh.filepath = None + eh.success = False + eh.fileprog = None + eh.filetime = -1 + eh.starttime = -1 + eh.uuid = None + self.__chandles.append(eh) + + # copy handles into handle freelist + self.__freehandles = self.__chandles[:] + + def __call_perform(self): + """An internal method that invokes the multi-handle's + perform method.""" + + while 1: + ret, active_handles = self.__mhandle.perform() + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + + self.__active_handles = active_handles + return ret + + def add_url( + self, + url, + filepath=None, + writefunc=None, + header=None, + progclass=None, + progtrack=None, + sslcert=None, + sslkey=None, + repourl=None, + compressible=False, + failonerror=True, + proxy=None, + runtime_proxy=None, + ): + """Add a URL to the transport engine. Caller must supply + either a filepath where the file should be downloaded, + or a callback to a function that will peform the write. + It may also optionally supply header information + in a dictionary. If the caller has a ProgressTracker, + it should pass the tracker in progtrack. The caller should + also supply a class that wraps the tracker in progclass. + + 'proxy' is the persistent proxy value for this url and is + stored as part of the transport stats accounting. + + 'runtime_proxy' is the actual proxy value that is used by pycurl + to retrieve this resource.""" + + t = TransportRequest( + url, + filepath=filepath, + writefunc=writefunc, + header=header, + progclass=progclass, + progtrack=progtrack, + sslcert=sslcert, + sslkey=sslkey, + repourl=repourl, + compressible=compressible, + failonerror=failonerror, + proxy=proxy, + runtime_proxy=runtime_proxy, + ) + + self.__req_q.appendleft(t) + + def __check_for_stalls(self): + """In some situations, libcurl can get itself + tied in a knot, and fail to make progress. Check that the + active handles are making progress. If none of the active + handles have downloaded any content for the timeout period, + reset the transport and generate exceptions for the failed + requests.""" + + timeout = global_settings.PKG_CLIENT_LOWSPEED_TIMEOUT + if timeout == 0: + return + current_time = time.time() + time_list = [] + size_list = [] + failures = [] + q_hdls = [ + hdl for hdl in self.__chandles if hdl not in self.__freehandles + ] + + # time.time() is based upon system clock. Check that + # our time hasn't been set backwards. If time is set forward, + # we'll have to expire the handles. There's no way to detect + # this until python properly implements gethrtime(). Solaris + # implementations of time.clock() appear broken. + + for h in q_hdls: + time_elapsed = current_time - h.starttime + if time_elapsed < 0: + h.starttime = current_time + time_elapsed = 0 + size_xfrd = h.getinfo(pycurl.SIZE_DOWNLOAD) + h.getinfo( + pycurl.SIZE_UPLOAD + ) + time_list.append(time_elapsed) + size_list.append(size_xfrd) + + # If timeout is smaller than smallest elapsed time, + # and no data has been transferred, abort. + if timeout < min(time_list) and max(size_list) == 0: + for h in q_hdls: + url = h.url + uuid = h.uuid + urlstem = h.repourl + ex = tx.TransportStallError(url, repourl=urlstem, uuid=uuid) + + self.__mhandle.remove_handle(h) + self.__teardown_handle(h) + self.__freehandles.append(h) + + failures.append(ex) + + self.__failures.extend(failures) + + def __cleanup_requests(self): + """Cleanup handles that have finished their request. + Return the handles to the freelist. Generate any + relevant error information.""" + + count, good, bad = self.__mhandle.info_read() + failures = self.__failures + success = self.__success + done_handles = [] + ex_to_raise = None + visited_repos = set() + errors_seen = 0 + + for h, en, em in bad: + # Get statistics for each handle. + # As new properties are added to URIs that differentiate + # them, the tuple used to index the __xport.stats entry + # should also include those properties so that we can + # track statistics uniquely for each RepoURI. That is, + # the format of the keys of the __xport.stats dictionary + # should match the one generated by + # pkg.client.publisher.TransportRepoURI.key() + repostats = self.__xport.stats[(h.repourl, h.proxy)] + visited_repos.add(repostats) + repostats.record_tx() + nbytes = h.getinfo(pycurl.SIZE_DOWNLOAD) + seconds = h.getinfo(pycurl.TOTAL_TIME) + conn_count = h.getinfo(pycurl.NUM_CONNECTS) + conn_time = h.getinfo(pycurl.CONNECT_TIME) + + url = h.url + uuid = h.uuid + urlstem = h.repourl + proto = urlsplit(url)[0] + + # When using pipelined operations, libcurl tracks the + # amount of time taken for the entire pipelined request + # as opposed to just the amount of time for a single + # file in the pipeline. So, if the connection time is 0 + # for a request using http(s), then it was pipelined and + # the total time must be obtained by subtracting the + # time the transfer of the individual request started + # from the total time. + if conn_time == 0 and proto in pipelined_protocols: + # Only performing this subtraction when the + # conn_time is 0 allows the first request in + # the pipeline to properly include connection + # time, etc. to initiate the transfer. + seconds -= h.getinfo(pycurl.STARTTRANSFER_TIME) + elif conn_time > 0: + seconds -= conn_time + + # Sometimes libcurl will report no transfer time. + # In that case, just use starttransfer time if it's + # non-zero. + if seconds < 0: + seconds = h.getinfo(pycurl.STARTTRANSFER_TIME) + + repostats.record_progress(nbytes, seconds) + + # Only count connections if the connection time is + # positive for http(s); for all other protocols, + # record the connection regardless. + if conn_count > 0 and conn_time > 0: + repostats.record_connection(conn_time) + + respcode = h.getinfo(pycurl.RESPONSE_CODE) + + # If we were cancelled, raise an API error. + # Otherwise fall through to transport's exception + # generation. + if en == pycurl.E_ABORTED_BY_CALLBACK: + ex = None + ex_to_raise = api_errors.CanceledException + elif en in ( + pycurl.E_HTTP_RETURNED_ERROR, + pycurl.E_FILE_COULDNT_READ_FILE, + ): + # E_HTTP_RETURNED_ERROR is only used for http:// + # and https://, but a more specific reason for + # failure can be obtained from respcode. + # + # E_FILE_COULDNT_READ_FILE is only used for + # file://, but unfortunately can mean ENOENT, + # EPERM, etc. and libcurl doesn't differentiate + # or provide a respcode. + if proto not in response_protocols: + # For protocols that don't provide a + # pycurl.RESPONSE_CODE, use the + # pycurl error number instead. + respcode = en + proto_reason = None + if proto in tx.proto_code_map: + # Look up protocol error code map + # from transport exception's table. + pmap = tx.proto_code_map[proto] + if respcode in pmap: + proto_reason = pmap[respcode] + ex = tx.TransportProtoError( + proto, + respcode, + url, + reason=proto_reason, + repourl=urlstem, + uuid=uuid, + ) + repostats.record_error(decayable=ex.decayable) + errors_seen += 1 + else: + timeout = en == pycurl.E_OPERATION_TIMEOUTED + ex = tx.TransportFrameworkError( + en, url, em, repourl=urlstem, uuid=uuid + ) + repostats.record_error(decayable=ex.decayable, timeout=timeout) + errors_seen += 1 + + if ex and ex.retryable: + failures.append(ex) + elif ex and not ex_to_raise: + ex_to_raise = ex + + done_handles.append(h) + + for h in good: + # Get statistics for each handle. + repostats = self.__xport.stats[(h.repourl, h.proxy)] + visited_repos.add(repostats) + repostats.record_tx() + nbytes = h.getinfo(pycurl.SIZE_DOWNLOAD) + seconds = h.getinfo(pycurl.TOTAL_TIME) + conn_count = h.getinfo(pycurl.NUM_CONNECTS) + conn_time = h.getinfo(pycurl.CONNECT_TIME) + h.filetime = h.getinfo(pycurl.INFO_FILETIME) + + url = h.url + uuid = h.uuid + urlstem = h.repourl + proto = urlsplit(url)[0] + + # When using pipelined operations, libcurl tracks the + # amount of time taken for the entire pipelined request + # as opposed to just the amount of time for a single + # file in the pipeline. So, if the connection time is 0 + # for a request using http(s), then it was pipelined and + # the total time must be obtained by subtracting the + # time the transfer of the individual request started + # from the total time. + if conn_time == 0 and proto in pipelined_protocols: + # Only performing this subtraction when the + # conn_time is 0 allows the first request in + # the pipeline to properly include connection + # time, etc. to initiate the transfer and + # the correct calculations of bytespersec. + seconds -= h.getinfo(pycurl.STARTTRANSFER_TIME) + elif conn_time > 0: + seconds -= conn_time + + if seconds > 0: + bytespersec = nbytes // seconds + else: + bytespersec = 0 + + # If a request ahead of a successful request fails due + # to a timeout, sometimes libcurl will report impossibly + # large total time values. In this case, check that the + # nbytes/sec exceeds our minimum threshold. If it does + # not, and the total time is longer than our timeout, + # discard the time calculation as it is bogus. + if (bytespersec < global_settings.pkg_client_lowspeed_limit) and ( + seconds > global_settings.PKG_CLIENT_LOWSPEED_TIMEOUT + ): + nbytes = 0 + seconds = 0 + repostats.record_progress(nbytes, seconds) + + # Only count connections if the connection time is + # positive for http(s); for all other protocols, + # record the connection regardless. + if conn_count > 0 and conn_time > 0: + repostats.record_connection(conn_time) + + respcode = h.getinfo(pycurl.RESPONSE_CODE) + + if proto not in response_protocols or respcode == http_client.OK: + h.success = True + repostats.clear_consecutive_errors() + success.append(url) + else: + proto_reason = None + if proto in tx.proto_code_map: + # Look up protocol error code map + # from transport exception's table. + pmap = tx.proto_code_map[proto] + if respcode in pmap: + proto_reason = pmap[respcode] + ex = tx.TransportProtoError( + proto, + respcode, + url, + reason=proto_reason, + repourl=urlstem, + uuid=uuid, + ) + + # If code >= 400, record this as an error. + # Handlers above the engine get to decide + # for 200/300 codes that aren't OK + if respcode >= 400: + repostats.record_error(decayable=ex.decayable) + errors_seen += 1 + # If code == 0, libcurl failed to read + # any HTTP status. Response is almost + # certainly corrupted. + elif respcode == 0: + repostats.record_error() + errors_seen += 1 + reason = "Invalid HTTP status code " "from server" + ex = tx.TransportProtoError( + proto, + url=url, + reason=reason, + repourl=urlstem, + uuid=uuid, + ) + ex.retryable = True + + # Stash retryable failures, arrange + # to raise first fatal error after + # cleanup. + if ex.retryable: + failures.append(ex) + elif not ex_to_raise: + ex_to_raise = ex + + done_handles.append(h) + + # Call to remove_handle must be separate from info_read() + for h in done_handles: + self.__mhandle.remove_handle(h) + self.__teardown_handle(h) + self.__freehandles.append(h) + + self.__failures = failures + self.__success = success + + if ex_to_raise: + raise ex_to_raise + + # Don't bother to check the transient error count if no errors + # were encountered in this transaction. + if errors_seen == 0: + return + + # If errors were encountered, but no exception raised, + # check if the maximum number of transient failures has + # been exceeded at any of the endpoints that were visited + # during this transaction. + for rs in visited_repos: + numce = rs.consecutive_errors + if numce >= global_settings.PKG_CLIENT_MAX_CONSECUTIVE_ERROR: + # Reset consecutive error count before raising + # this exception. + rs.clear_consecutive_errors() + raise tx.ExcessiveTransientFailure(rs.url, numce) + + def check_status(self, urllist=None, good_reqs=False): + """Return information about retryable failures that occured + during the request. + + This is a list of transport exceptions. Caller + may raise these, or process them for failure information. + + Urllist is an optional argument to return only failures + for a specific URLs. Not all callers of check status + want to claim the error state of all pending transactions. + + Transient errors are part of standard control flow. + The caller will look at these and decide whether + to throw them or not. Permanent failures are raised + by the transport engine as soon as they occur. + + If good_reqs is set to true, then check_stats will + return a tuple of lists, the first list contains the + transient errors that were encountered, the second list + contains successfully transferred urls. Because the + list of successfully transferred URLs may be long, + it is discarded if not requested by the caller.""" + + # if list not specified, return all failures + if not urllist: + rf = self.__failures + rs = self.__success + self.__failures = [] + self.__success = [] + + if good_reqs: return rf, rs - def get_url(self, url, header=None, sslcert=None, sslkey=None, - repourl=None, compressible=False, ccancel=None, - failonerror=True, proxy=None, runtime_proxy=None, system=False): - """Invoke the engine to retrieve a single URL. Callers - wishing to obtain multiple URLs at once should use - addUrl() and run(). - - getUrl will return a read-only file object that allows access - to the URL's data. - - 'proxy' is the persistent proxy value for this url and is - stored as part of the transport stats accounting. - - 'runtime_proxy' is the actual proxy value that is used by pycurl - to retrieve this resource. - - 'system' whether the resource is being retrieved on behalf of - a system-publisher or directly from the system-repository. - """ - - fobj = fileobj.StreamingFileObj(url, self, ccancel=ccancel) - progfunc = None - - if ccancel: - progfunc = fobj.get_progress_func() - - t = TransportRequest(url, writefunc=fobj.get_write_func(), - hdrfunc=fobj.get_header_func(), header=header, - sslcert=sslcert, sslkey=sslkey, repourl=repourl, - compressible=compressible, progfunc=progfunc, - uuid=fobj.uuid, failonerror=failonerror, proxy=proxy, - runtime_proxy=runtime_proxy, system=system) - - self.__req_q.appendleft(t) - - return fobj - - def get_url_header(self, url, header=None, sslcert=None, sslkey=None, - repourl=None, ccancel=None, failonerror=True, proxy=None, - runtime_proxy=None): - """Invoke the engine to retrieve a single URL's headers. - - getUrlHeader will return a read-only file object that - contains no data. - - 'proxy' is the persistent proxy value for this url and is - stored as part of the transport stats accounting. - - 'runtime_proxy' is the actual proxy value that is used by pycurl - to retrieve this resource. - """ - - fobj = fileobj.StreamingFileObj(url, self, ccancel=ccancel) - progfunc = None - - if ccancel: - progfunc = fobj.get_progress_func() - - t = TransportRequest(url, writefunc=fobj.get_write_func(), - hdrfunc=fobj.get_header_func(), header=header, - httpmethod="HEAD", sslcert=sslcert, sslkey=sslkey, - repourl=repourl, progfunc=progfunc, uuid=fobj.uuid, - failonerror=failonerror, proxy=proxy, - runtime_proxy=runtime_proxy) - - self.__req_q.appendleft(t) - - return fobj - - @property - def pending(self): - """Returns true if the engine still has outstanding - work to perform, false otherwise.""" - - return bool(self.__req_q) or self.__active_handles > 0 - - def run(self): - """Run the transport engine. This polls the underlying - framework to complete any asynchronous I/O. Synchronous - operations should have completed when startRequest - was invoked.""" - - if not self.pending: - return - - if self.__active_handles > 0: - # timeout returned in milliseconds - timeout = self.__mhandle.timeout() - if timeout == -1: - # Pick our own timeout. - timeout = 1.0 - elif timeout > 0: - # Timeout of 0 means skip call - # to select. - # - # Convert from milliseconds to seconds. - timeout = timeout / 1000.0 - - if timeout: - self.__mhandle.select(timeout) - - # If object deletion has given the transport engine orphaned - # requests to purge, do this first, in case the cleanup yields - # free handles. - while self.__orphans: - url, uuid = self.__orphans.pop() - self.remove_request(url, uuid) - - while self.__freehandles and self.__req_q: - t = self.__req_q.pop() - eh = self.__freehandles.pop(-1) - self.__setup_handle(eh, t) - self.__mhandle.add_handle(eh) - - self.__call_perform() - - self.__cleanup_requests() - - if self.__active_handles and (not self.__freehandles or not - self.__req_q): - cur_clock = time.time() - if cur_clock - self.__last_stall_check > 1: - self.__last_stall_check = cur_clock - self.__check_for_stalls() - elif cur_clock - self.__last_stall_check < 0: - self.__last_stall_check = cur_clock - self.__check_for_stalls() - - def orphaned_request(self, url, uuid): - """Add the URL to the list of orphaned requests. Any URL in - list will be removed from the transport next time run() is - invoked. This is used by the fileobj's __del__ method - to prevent unintended modifications to transport state - when StreamingFileObjs that aren't close()'d get cleaned - up.""" - - self.__orphans.add((url, uuid)) - - def remove_request(self, url, uuid): - """In order to remove a request, it may be necessary - to walk all of the items in the request queue, all of the - currently active handles, and the list of any transient - failures. This is expensive, so only remove a request - if absolutely necessary.""" - - for h in self.__chandles: - if h.url == url and h.uuid == uuid and \ - h not in self.__freehandles: - try: - self.__mhandle.remove_handle(h) - except pycurl.error: - # If cleanup is interrupted, it's - # possible that a handle was removed but - # not placed in freelist. In that case, - # finish cleanup and appened to - # freehandles. - pass - self.__teardown_handle(h) - self.__freehandles.append(h) - return - - for i, t in enumerate(self.__req_q): - if t.url == url and t.uuid == uuid: - del self.__req_q[i] - return - - for ex in self.__failures: - if ex.url == url and ex.uuid == uuid: - self.__failures.remove(ex) - return - - def reset(self): - """Reset the state of the transport engine. Do this - before performing another type of request.""" - - for c in self.__chandles: - if c not in self.__freehandles: - try: - self.__mhandle.remove_handle(c) - except pycurl.error: - # If cleanup is interrupted, it's - # possible that a handle was removed but - # not placed in freelist. In that case, - # finish cleanup and appened to - # freehandles. - pass - self.__teardown_handle(c) - - self.__active_handles = 0 - self.__freehandles = self.__chandles[:] - self.__req_q = deque() - self.__failures = [] - self.__success = [] - self.__orphans = set() - - def send_data(self, url, data=None, header=None, sslcert=None, - sslkey=None, repourl=None, ccancel=None, - data_fobj=None, data_fp=None, failonerror=True, - progclass=None, progtrack=None, proxy=None, runtime_proxy=None): - """Invoke the engine to retrieve a single URL. - This routine sends the data in data, and returns the - server's response. - - Callers wishing to obtain multiple URLs at once should use - addUrl() and run(). - - sendData will return a read-only file object that allows access - to the server's response..""" - - fobj = fileobj.StreamingFileObj(url, self, ccancel=ccancel) - progfunc = None - - if ccancel and not progtrack and not progclass: - progfunc = fobj.get_progress_func() - - t = TransportRequest(url, writefunc=fobj.get_write_func(), - hdrfunc=fobj.get_header_func(), header=header, data=data, - httpmethod="POST", sslcert=sslcert, sslkey=sslkey, - repourl=repourl, progfunc=progfunc, uuid=fobj.uuid, - read_fobj=data_fobj, read_filepath=data_fp, - failonerror=failonerror, progclass=progclass, - progtrack=progtrack, proxy=proxy, - runtime_proxy=runtime_proxy) - - self.__req_q.appendleft(t) - - return fobj - - def set_file_bufsz(self, size): - """If the downloaded files are being written out by - the file() mechanism, and not written using a callback, - the I/O is buffered. Set the buffer size using - this function. If it's not set, a default of 131072 (128k) - is used.""" - - if size <= 0: - self.__file_bufsz = 8192 - return - - self.__file_bufsz = size - - def set_header(self, hdrdict=None): - """Supply a dictionary of name/value pairs in hdrdict. - These will be included on all requests issued by the transport - engine. To append a specific header to a certain request, - supply a dictionary to the header argument of addUrl.""" - - if not hdrdict: - self.__common_header = {} - return - - self.__common_header = hdrdict - - def set_user_agent(self, ua_str): - """Supply a string str and the transport engine will - use this string as its User-Agent header. This is - a header that will be common to all transport requests.""" - - self.__user_agent = ua_str - - def __setup_handle(self, hdl, treq): - """Setup the curl easy handle, hdl, with the parameters - specified in the TransportRequest treq. If global - parameters are set, apply these to the handle as well.""" - - # Set nosignal, so timeouts don't crash client - hdl.setopt(pycurl.NOSIGNAL, 1) - - if DebugValues.get("curlverbose", False): - hdl.setopt(pycurl.VERBOSE, 1) - - # Set connect timeout. Its value is defined in global_settings. - hdl.setopt(pycurl.CONNECTTIMEOUT, - global_settings.PKG_CLIENT_CONNECT_TIMEOUT) - - # Set lowspeed limit and timeout. Clients that are too - # slow or have hung after specified amount of time will - # abort the connection. - hdl.setopt(pycurl.LOW_SPEED_LIMIT, - global_settings.pkg_client_lowspeed_limit) - hdl.setopt(pycurl.LOW_SPEED_TIME, - global_settings.PKG_CLIENT_LOWSPEED_TIMEOUT) - - # Follow redirects - hdl.setopt(pycurl.FOLLOWLOCATION, True) - # Set limit on maximum number of redirects - hdl.setopt(pycurl.MAXREDIRS, - global_settings.PKG_CLIENT_MAX_REDIRECT) - - # Use HTTP/1.1 - hdl.setopt(pycurl.HTTP_VERSION, pycurl.CURL_HTTP_VERSION_1_1) - - # Store the proxy in the handle so it can be used to retrieve - # transport statistics later. - hdl.proxy = None - hdl.runtime_proxy = None - - if treq.system: - # For requests that are proxied through the system - # repository, we do not want to use $http_proxy - # variables. For direct access to the - # system-repository, we set an empty proxy, which has - # the same effect. - if treq.proxy: - hdl.proxy = treq.proxy - hdl.setopt(pycurl.PROXY, treq.proxy) - else: - hdl.setopt(pycurl.PROXY, "") - elif treq.runtime_proxy: - # Allow $http_proxy environment variables - if treq.runtime_proxy != "-": - # a runtime_proxy of '-' means we've found a - # no-proxy environment variable. - hdl.setopt(pycurl.PROXY, treq.runtime_proxy) - hdl.proxy = treq.proxy - hdl.runtime_proxy = treq.runtime_proxy - else: - # Make sure that we don't use a proxy if the destination - # is localhost. - hdl.setopt(pycurl.NOPROXY, "localhost") - - # Set user agent, if client has defined it - if self.__user_agent: - hdl.setopt(pycurl.USERAGENT, self.__user_agent) - - # Take header dictionaries and convert them into lists - # of header strings. - if self.__common_header or treq.header: - headerlist = [] - - # Headers common to all requests - for k, v in six.iteritems(self.__common_header): - headerstr = "{0}: {1}".format(k, v) - headerlist.append(headerstr) - - # Headers specific to this request - if treq.header: - for k, v in six.iteritems(treq.header): - headerstr = "{0}: {1}".format(k, v) - headerlist.append(headerstr) - - hdl.setopt(pycurl.HTTPHEADER, headerlist) - - # Set request url. Also set attribute on handle. - hdl.setopt(pycurl.URL, treq.url.encode('ascii', 'ignore')) - hdl.url = treq.url - hdl.uuid = treq.uuid - hdl.starttime = time.time() - # The repourl is the url stem that identifies the - # repository. This is useful to have around for coalescing - # error output, and statistics reporting. - hdl.repourl = treq.repourl - if treq.filepath: - try: - hdl.fobj = open(treq.filepath, "wb+", - self.__file_bufsz) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) - # Raise OperationError if it's not EACCES - # or EROFS. - raise tx.TransportOperationError( - "Unable to open file: {0}".format(e)) - - hdl.setopt(pycurl.WRITEDATA, hdl.fobj) - # Request filetime, if endpoint knows it. - hdl.setopt(pycurl.OPT_FILETIME, True) - hdl.filepath = treq.filepath - elif treq.writefunc: - hdl.setopt(pycurl.WRITEFUNCTION, treq.writefunc) - hdl.filepath = None - hdl.fobj = None - else: - raise tx.TransportOperationError("Transport invocation" - " for URL {0} did not specify filepath or write" - " function.".format(treq.url)) - - if treq.failonerror: - hdl.setopt(pycurl.FAILONERROR, True) - - if treq.progtrack and treq.progclass: - hdl.setopt(pycurl.NOPROGRESS, 0) - hdl.fileprog = treq.progclass(treq.progtrack) - hdl.setopt(pycurl.PROGRESSFUNCTION, - hdl.fileprog.progress_callback) - elif treq.progfunc: - # For light-weight progress tracking / cancelation. - hdl.setopt(pycurl.NOPROGRESS, 0) - hdl.setopt(pycurl.PROGRESSFUNCTION, treq.progfunc) - - proto = urlsplit(treq.url)[0] - if not proto in ("http", "https"): - return - - if treq.read_filepath: - try: - hdl.r_fobj = open(treq.read_filepath, "rb", - self.__file_bufsz) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - # Raise OperationError if it's not EACCES - # or EROFS. - raise tx.TransportOperationError( - "Unable to open file: {0}".format(e)) - - if treq.compressible: - hdl.setopt(pycurl.ENCODING, "") - - if treq.hdrfunc: - hdl.setopt(pycurl.HEADERFUNCTION, treq.hdrfunc) - - if treq.httpmethod == "GET": - hdl.setopt(pycurl.HTTPGET, True) - elif treq.httpmethod == "HEAD": - hdl.setopt(pycurl.NOBODY, True) - elif treq.httpmethod == "POST": - hdl.setopt(pycurl.POST, True) - if treq.data is not None: - hdl.setopt(pycurl.POSTFIELDS, treq.data) - elif hdl.r_fobj or treq.read_fobj: - if not hdl.r_fobj: - hdl.r_fobj = treq.read_fobj - hdl.setopt(pycurl.READDATA, hdl.r_fobj) - hdl.setopt(pycurl.POSTFIELDSIZE, - os.fstat(hdl.r_fobj.fileno()).st_size) - else: - raise tx.TransportOperationError("Transport " - "operation for POST URL {0} did not " - "supply data or read_fobj. At least one " - "is required.".format(treq.url)) - elif treq.httpmethod == "PUT": - hdl.setopt(pycurl.UPLOAD, True) - if hdl.r_fobj or treq.read_fobj: - if not hdl.r_fobj: - hdl.r_fobj = treq.read_fobj - hdl.setopt(pycurl.READDATA, hdl.r_fobj) - hdl.setopt(pycurl.INFILESIZE, - os.fstat(hdl.r_fobj.fileno()).st_size) - else: - raise tx.TransportOperationError("Transport " - "operation for PUT URL {0} did not " - "supply a read_fobj. One is " - "required.".format(treq.url)) - elif treq.httpmethod == "DELETE": - hdl.setopt(pycurl.CUSTOMREQUEST, "DELETE") - else: - raise tx.TransportOperationError("Invalid http method " - "'{0}' specified.".format(treq.httpmethod)) - - # Set up SSL options - if treq.sslcert: - hdl.setopt(pycurl.SSLCERT, treq.sslcert) - if treq.sslkey: - hdl.setopt(pycurl.SSLKEY, treq.sslkey) - - # Options that apply when SSL is enabled - if proto == "https": - # Verify that peer's CN matches CN on certificate - hdl.setopt(pycurl.SSL_VERIFYHOST, 2) - hdl.setopt(pycurl.SSL_VERIFYPEER, 1) - cadir = self.__xport.get_ca_dir() - hdl.setopt(pycurl.CAPATH, cadir) - if "ssl_ca_file" in DebugValues: - cafile = DebugValues["ssl_ca_file"] - hdl.setopt(pycurl.CAINFO, cafile) - hdl.unsetopt(pycurl.CAPATH) - else: - hdl.unsetopt(pycurl.CAINFO) - - def shutdown(self): - """Shutdown the transport engine, perform cleanup.""" - - for c in self.__chandles: - c.close() - - self.__chandles = None - self.__freehandles = None - self.__mhandle.close() - self.__mhandle = None - self.__req_q = None - self.__failures = None - self.__success = None - self.__orphans = None - self.__active_handles = 0 - - @staticmethod - def __teardown_handle(hdl): - """Cleanup any state that we've associated with this handle. - After a handle has been torn down, it should still be valid - for use, but should have no previous state. To remove - handles from use completely, use __shutdown.""" - - hdl.reset() - if hdl.fobj: - hdl.fobj.close() - hdl.fobj = None - if not hdl.success: - if hdl.fileprog: - hdl.fileprog.abort() - try: - os.remove(hdl.filepath) - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise \ - tx.TransportOperationError( - "Unable to remove file: " - "{0}".format(e)) - else: - if hdl.fileprog: - filesz = os.stat(hdl.filepath).st_size - hdl.fileprog.commit(filesz) - if hdl.filepath and hdl.filetime > -1: - # Set atime/mtime, if we were able to - # figure it out. File action will - # override this at install time, if the - # action has a timestamp property. - ft = hdl.filetime - os.utime(hdl.filepath, (ft, ft)) - - if hdl.r_fobj: - hdl.r_fobj.close() - hdl.r_fobj = None - - hdl.url = None - hdl.repourl = None - hdl.success = False - hdl.filepath = None - hdl.fileprog = None - hdl.uuid = None - hdl.filetime = -1 - hdl.starttime = -1 + return rf + + # otherwise, look for failures that match just the URLs + # in urllist. + rf = [ + tf + for tf in self.__failures + if hasattr(tf, "url") and tf.url in urllist + ] + + # remove failues in separate pass, or else for loop gets + # confused. + for f in rf: + self.__failures.remove(f) + + if not good_reqs: + self.__success = [] + return rf + + rs = [] + + for ts in self.__success: + if ts in urllist: + rs.append(ts) + + for s in rs: + self.__success.remove(s) + + return rf, rs + + def get_url( + self, + url, + header=None, + sslcert=None, + sslkey=None, + repourl=None, + compressible=False, + ccancel=None, + failonerror=True, + proxy=None, + runtime_proxy=None, + system=False, + ): + """Invoke the engine to retrieve a single URL. Callers + wishing to obtain multiple URLs at once should use + addUrl() and run(). + + getUrl will return a read-only file object that allows access + to the URL's data. + + 'proxy' is the persistent proxy value for this url and is + stored as part of the transport stats accounting. + + 'runtime_proxy' is the actual proxy value that is used by pycurl + to retrieve this resource. + + 'system' whether the resource is being retrieved on behalf of + a system-publisher or directly from the system-repository. + """ + + fobj = fileobj.StreamingFileObj(url, self, ccancel=ccancel) + progfunc = None + + if ccancel: + progfunc = fobj.get_progress_func() + + t = TransportRequest( + url, + writefunc=fobj.get_write_func(), + hdrfunc=fobj.get_header_func(), + header=header, + sslcert=sslcert, + sslkey=sslkey, + repourl=repourl, + compressible=compressible, + progfunc=progfunc, + uuid=fobj.uuid, + failonerror=failonerror, + proxy=proxy, + runtime_proxy=runtime_proxy, + system=system, + ) + + self.__req_q.appendleft(t) + + return fobj + + def get_url_header( + self, + url, + header=None, + sslcert=None, + sslkey=None, + repourl=None, + ccancel=None, + failonerror=True, + proxy=None, + runtime_proxy=None, + ): + """Invoke the engine to retrieve a single URL's headers. + + getUrlHeader will return a read-only file object that + contains no data. + + 'proxy' is the persistent proxy value for this url and is + stored as part of the transport stats accounting. + + 'runtime_proxy' is the actual proxy value that is used by pycurl + to retrieve this resource. + """ + + fobj = fileobj.StreamingFileObj(url, self, ccancel=ccancel) + progfunc = None + + if ccancel: + progfunc = fobj.get_progress_func() + + t = TransportRequest( + url, + writefunc=fobj.get_write_func(), + hdrfunc=fobj.get_header_func(), + header=header, + httpmethod="HEAD", + sslcert=sslcert, + sslkey=sslkey, + repourl=repourl, + progfunc=progfunc, + uuid=fobj.uuid, + failonerror=failonerror, + proxy=proxy, + runtime_proxy=runtime_proxy, + ) + + self.__req_q.appendleft(t) + + return fobj + + @property + def pending(self): + """Returns true if the engine still has outstanding + work to perform, false otherwise.""" + + return bool(self.__req_q) or self.__active_handles > 0 + + def run(self): + """Run the transport engine. This polls the underlying + framework to complete any asynchronous I/O. Synchronous + operations should have completed when startRequest + was invoked.""" + + if not self.pending: + return + + if self.__active_handles > 0: + # timeout returned in milliseconds + timeout = self.__mhandle.timeout() + if timeout == -1: + # Pick our own timeout. + timeout = 1.0 + elif timeout > 0: + # Timeout of 0 means skip call + # to select. + # + # Convert from milliseconds to seconds. + timeout = timeout / 1000.0 + + if timeout: + self.__mhandle.select(timeout) + + # If object deletion has given the transport engine orphaned + # requests to purge, do this first, in case the cleanup yields + # free handles. + while self.__orphans: + url, uuid = self.__orphans.pop() + self.remove_request(url, uuid) + + while self.__freehandles and self.__req_q: + t = self.__req_q.pop() + eh = self.__freehandles.pop(-1) + self.__setup_handle(eh, t) + self.__mhandle.add_handle(eh) + + self.__call_perform() + + self.__cleanup_requests() + + if self.__active_handles and ( + not self.__freehandles or not self.__req_q + ): + cur_clock = time.time() + if cur_clock - self.__last_stall_check > 1: + self.__last_stall_check = cur_clock + self.__check_for_stalls() + elif cur_clock - self.__last_stall_check < 0: + self.__last_stall_check = cur_clock + self.__check_for_stalls() + + def orphaned_request(self, url, uuid): + """Add the URL to the list of orphaned requests. Any URL in + list will be removed from the transport next time run() is + invoked. This is used by the fileobj's __del__ method + to prevent unintended modifications to transport state + when StreamingFileObjs that aren't close()'d get cleaned + up.""" + + self.__orphans.add((url, uuid)) + + def remove_request(self, url, uuid): + """In order to remove a request, it may be necessary + to walk all of the items in the request queue, all of the + currently active handles, and the list of any transient + failures. This is expensive, so only remove a request + if absolutely necessary.""" + + for h in self.__chandles: + if h.url == url and h.uuid == uuid and h not in self.__freehandles: + try: + self.__mhandle.remove_handle(h) + except pycurl.error: + # If cleanup is interrupted, it's + # possible that a handle was removed but + # not placed in freelist. In that case, + # finish cleanup and appened to + # freehandles. + pass + self.__teardown_handle(h) + self.__freehandles.append(h) + return + + for i, t in enumerate(self.__req_q): + if t.url == url and t.uuid == uuid: + del self.__req_q[i] + return + + for ex in self.__failures: + if ex.url == url and ex.uuid == uuid: + self.__failures.remove(ex) + return + + def reset(self): + """Reset the state of the transport engine. Do this + before performing another type of request.""" + + for c in self.__chandles: + if c not in self.__freehandles: + try: + self.__mhandle.remove_handle(c) + except pycurl.error: + # If cleanup is interrupted, it's + # possible that a handle was removed but + # not placed in freelist. In that case, + # finish cleanup and appened to + # freehandles. + pass + self.__teardown_handle(c) + + self.__active_handles = 0 + self.__freehandles = self.__chandles[:] + self.__req_q = deque() + self.__failures = [] + self.__success = [] + self.__orphans = set() + + def send_data( + self, + url, + data=None, + header=None, + sslcert=None, + sslkey=None, + repourl=None, + ccancel=None, + data_fobj=None, + data_fp=None, + failonerror=True, + progclass=None, + progtrack=None, + proxy=None, + runtime_proxy=None, + ): + """Invoke the engine to retrieve a single URL. + This routine sends the data in data, and returns the + server's response. + + Callers wishing to obtain multiple URLs at once should use + addUrl() and run(). + + sendData will return a read-only file object that allows access + to the server's response..""" + + fobj = fileobj.StreamingFileObj(url, self, ccancel=ccancel) + progfunc = None + + if ccancel and not progtrack and not progclass: + progfunc = fobj.get_progress_func() + + t = TransportRequest( + url, + writefunc=fobj.get_write_func(), + hdrfunc=fobj.get_header_func(), + header=header, + data=data, + httpmethod="POST", + sslcert=sslcert, + sslkey=sslkey, + repourl=repourl, + progfunc=progfunc, + uuid=fobj.uuid, + read_fobj=data_fobj, + read_filepath=data_fp, + failonerror=failonerror, + progclass=progclass, + progtrack=progtrack, + proxy=proxy, + runtime_proxy=runtime_proxy, + ) + + self.__req_q.appendleft(t) + + return fobj + + def set_file_bufsz(self, size): + """If the downloaded files are being written out by + the file() mechanism, and not written using a callback, + the I/O is buffered. Set the buffer size using + this function. If it's not set, a default of 131072 (128k) + is used.""" + + if size <= 0: + self.__file_bufsz = 8192 + return + + self.__file_bufsz = size + + def set_header(self, hdrdict=None): + """Supply a dictionary of name/value pairs in hdrdict. + These will be included on all requests issued by the transport + engine. To append a specific header to a certain request, + supply a dictionary to the header argument of addUrl.""" + + if not hdrdict: + self.__common_header = {} + return + + self.__common_header = hdrdict + + def set_user_agent(self, ua_str): + """Supply a string str and the transport engine will + use this string as its User-Agent header. This is + a header that will be common to all transport requests.""" + + self.__user_agent = ua_str + + def __setup_handle(self, hdl, treq): + """Setup the curl easy handle, hdl, with the parameters + specified in the TransportRequest treq. If global + parameters are set, apply these to the handle as well.""" + + # Set nosignal, so timeouts don't crash client + hdl.setopt(pycurl.NOSIGNAL, 1) + + if DebugValues.get("curlverbose", False): + hdl.setopt(pycurl.VERBOSE, 1) + + # Set connect timeout. Its value is defined in global_settings. + hdl.setopt( + pycurl.CONNECTTIMEOUT, global_settings.PKG_CLIENT_CONNECT_TIMEOUT + ) + + # Set lowspeed limit and timeout. Clients that are too + # slow or have hung after specified amount of time will + # abort the connection. + hdl.setopt( + pycurl.LOW_SPEED_LIMIT, global_settings.pkg_client_lowspeed_limit + ) + hdl.setopt( + pycurl.LOW_SPEED_TIME, global_settings.PKG_CLIENT_LOWSPEED_TIMEOUT + ) + + # Follow redirects + hdl.setopt(pycurl.FOLLOWLOCATION, True) + # Set limit on maximum number of redirects + hdl.setopt(pycurl.MAXREDIRS, global_settings.PKG_CLIENT_MAX_REDIRECT) + + # Use HTTP/1.1 + hdl.setopt(pycurl.HTTP_VERSION, pycurl.CURL_HTTP_VERSION_1_1) + + # Store the proxy in the handle so it can be used to retrieve + # transport statistics later. + hdl.proxy = None + hdl.runtime_proxy = None + + if treq.system: + # For requests that are proxied through the system + # repository, we do not want to use $http_proxy + # variables. For direct access to the + # system-repository, we set an empty proxy, which has + # the same effect. + if treq.proxy: + hdl.proxy = treq.proxy + hdl.setopt(pycurl.PROXY, treq.proxy) + else: + hdl.setopt(pycurl.PROXY, "") + elif treq.runtime_proxy: + # Allow $http_proxy environment variables + if treq.runtime_proxy != "-": + # a runtime_proxy of '-' means we've found a + # no-proxy environment variable. + hdl.setopt(pycurl.PROXY, treq.runtime_proxy) + hdl.proxy = treq.proxy + hdl.runtime_proxy = treq.runtime_proxy + else: + # Make sure that we don't use a proxy if the destination + # is localhost. + hdl.setopt(pycurl.NOPROXY, "localhost") + + # Set user agent, if client has defined it + if self.__user_agent: + hdl.setopt(pycurl.USERAGENT, self.__user_agent) + + # Take header dictionaries and convert them into lists + # of header strings. + if self.__common_header or treq.header: + headerlist = [] + + # Headers common to all requests + for k, v in six.iteritems(self.__common_header): + headerstr = "{0}: {1}".format(k, v) + headerlist.append(headerstr) + + # Headers specific to this request + if treq.header: + for k, v in six.iteritems(treq.header): + headerstr = "{0}: {1}".format(k, v) + headerlist.append(headerstr) + + hdl.setopt(pycurl.HTTPHEADER, headerlist) + + # Set request url. Also set attribute on handle. + hdl.setopt(pycurl.URL, treq.url.encode("ascii", "ignore")) + hdl.url = treq.url + hdl.uuid = treq.uuid + hdl.starttime = time.time() + # The repourl is the url stem that identifies the + # repository. This is useful to have around for coalescing + # error output, and statistics reporting. + hdl.repourl = treq.repourl + if treq.filepath: + try: + hdl.fobj = open(treq.filepath, "wb+", self.__file_bufsz) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + # Raise OperationError if it's not EACCES + # or EROFS. + raise tx.TransportOperationError( + "Unable to open file: {0}".format(e) + ) + + hdl.setopt(pycurl.WRITEDATA, hdl.fobj) + # Request filetime, if endpoint knows it. + hdl.setopt(pycurl.OPT_FILETIME, True) + hdl.filepath = treq.filepath + elif treq.writefunc: + hdl.setopt(pycurl.WRITEFUNCTION, treq.writefunc) + hdl.filepath = None + hdl.fobj = None + else: + raise tx.TransportOperationError( + "Transport invocation" + " for URL {0} did not specify filepath or write" + " function.".format(treq.url) + ) + + if treq.failonerror: + hdl.setopt(pycurl.FAILONERROR, True) + + if treq.progtrack and treq.progclass: + hdl.setopt(pycurl.NOPROGRESS, 0) + hdl.fileprog = treq.progclass(treq.progtrack) + hdl.setopt(pycurl.PROGRESSFUNCTION, hdl.fileprog.progress_callback) + elif treq.progfunc: + # For light-weight progress tracking / cancelation. + hdl.setopt(pycurl.NOPROGRESS, 0) + hdl.setopt(pycurl.PROGRESSFUNCTION, treq.progfunc) + + proto = urlsplit(treq.url)[0] + if not proto in ("http", "https"): + return + + if treq.read_filepath: + try: + hdl.r_fobj = open(treq.read_filepath, "rb", self.__file_bufsz) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + # Raise OperationError if it's not EACCES + # or EROFS. + raise tx.TransportOperationError( + "Unable to open file: {0}".format(e) + ) + + if treq.compressible: + hdl.setopt(pycurl.ENCODING, "") + + if treq.hdrfunc: + hdl.setopt(pycurl.HEADERFUNCTION, treq.hdrfunc) + + if treq.httpmethod == "GET": + hdl.setopt(pycurl.HTTPGET, True) + elif treq.httpmethod == "HEAD": + hdl.setopt(pycurl.NOBODY, True) + elif treq.httpmethod == "POST": + hdl.setopt(pycurl.POST, True) + if treq.data is not None: + hdl.setopt(pycurl.POSTFIELDS, treq.data) + elif hdl.r_fobj or treq.read_fobj: + if not hdl.r_fobj: + hdl.r_fobj = treq.read_fobj + hdl.setopt(pycurl.READDATA, hdl.r_fobj) + hdl.setopt( + pycurl.POSTFIELDSIZE, os.fstat(hdl.r_fobj.fileno()).st_size + ) + else: + raise tx.TransportOperationError( + "Transport " + "operation for POST URL {0} did not " + "supply data or read_fobj. At least one " + "is required.".format(treq.url) + ) + elif treq.httpmethod == "PUT": + hdl.setopt(pycurl.UPLOAD, True) + if hdl.r_fobj or treq.read_fobj: + if not hdl.r_fobj: + hdl.r_fobj = treq.read_fobj + hdl.setopt(pycurl.READDATA, hdl.r_fobj) + hdl.setopt( + pycurl.INFILESIZE, os.fstat(hdl.r_fobj.fileno()).st_size + ) + else: + raise tx.TransportOperationError( + "Transport " + "operation for PUT URL {0} did not " + "supply a read_fobj. One is " + "required.".format(treq.url) + ) + elif treq.httpmethod == "DELETE": + hdl.setopt(pycurl.CUSTOMREQUEST, "DELETE") + else: + raise tx.TransportOperationError( + "Invalid http method " + "'{0}' specified.".format(treq.httpmethod) + ) + + # Set up SSL options + if treq.sslcert: + hdl.setopt(pycurl.SSLCERT, treq.sslcert) + if treq.sslkey: + hdl.setopt(pycurl.SSLKEY, treq.sslkey) + + # Options that apply when SSL is enabled + if proto == "https": + # Verify that peer's CN matches CN on certificate + hdl.setopt(pycurl.SSL_VERIFYHOST, 2) + hdl.setopt(pycurl.SSL_VERIFYPEER, 1) + cadir = self.__xport.get_ca_dir() + hdl.setopt(pycurl.CAPATH, cadir) + if "ssl_ca_file" in DebugValues: + cafile = DebugValues["ssl_ca_file"] + hdl.setopt(pycurl.CAINFO, cafile) + hdl.unsetopt(pycurl.CAPATH) + else: + hdl.unsetopt(pycurl.CAINFO) + + def shutdown(self): + """Shutdown the transport engine, perform cleanup.""" + + for c in self.__chandles: + c.close() + + self.__chandles = None + self.__freehandles = None + self.__mhandle.close() + self.__mhandle = None + self.__req_q = None + self.__failures = None + self.__success = None + self.__orphans = None + self.__active_handles = 0 + + @staticmethod + def __teardown_handle(hdl): + """Cleanup any state that we've associated with this handle. + After a handle has been torn down, it should still be valid + for use, but should have no previous state. To remove + handles from use completely, use __shutdown.""" + + hdl.reset() + if hdl.fobj: + hdl.fobj.close() + hdl.fobj = None + if not hdl.success: + if hdl.fileprog: + hdl.fileprog.abort() + try: + os.remove(hdl.filepath) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise tx.TransportOperationError( + "Unable to remove file: " "{0}".format(e) + ) + else: + if hdl.fileprog: + filesz = os.stat(hdl.filepath).st_size + hdl.fileprog.commit(filesz) + if hdl.filepath and hdl.filetime > -1: + # Set atime/mtime, if we were able to + # figure it out. File action will + # override this at install time, if the + # action has a timestamp property. + ft = hdl.filetime + os.utime(hdl.filepath, (ft, ft)) + + if hdl.r_fobj: + hdl.r_fobj.close() + hdl.r_fobj = None + + hdl.url = None + hdl.repourl = None + hdl.success = False + hdl.filepath = None + hdl.fileprog = None + hdl.uuid = None + hdl.filetime = -1 + hdl.starttime = -1 class TransportRequest(object): - """A class that contains per-request information for the underlying - transport engines. This is used to set per-request options that - are used either by the framework, the transport, or both.""" - - def __init__(self, url, filepath=None, writefunc=None, - hdrfunc=None, header=None, data=None, httpmethod="GET", - progclass=None, progtrack=None, sslcert=None, sslkey=None, - repourl=None, compressible=False, progfunc=None, uuid=None, - read_fobj=None, read_filepath=None, failonerror=False, proxy=None, - runtime_proxy=None, system=False): - """Create a TransportRequest with the following parameters: - - url - The url that the transport engine should retrieve - - filepath - If defined, the transport engine will download the - file to this path. If not defined, the caller should - supply a write function. - - writefunc - A function, supplied instead of filepath, that - reads the bytes supplied by the transport engine and writes - them somewhere for processing. This is a callback. - - hdrfunc - A callback for examining the contents of header - data in a response to a transport request. - - header - A dictionary of key/value pairs to be included - in the request's header. - - compressible - A boolean value that indicates whether - the content that is requested is a candidate for transport - level compression. - - data - If the request is sending a data payload, include - the data in this argument. - - failonerror - If the request returns a HTTP code >= 400, - terminate the request early, instead of running it to - completion. - - httpmethod - If the request is a HTTP/HTTPS request, - this can override the default HTTP method of GET. - - progtrack - If the transport wants the engine to update - the progress of the download, supply a ProgressTracker - object in this argument. - - progclass - If the transport was supplied with a ProgressTracker - this must point to a class that knows how to wrap the progress - tracking object in way that allows the transport to invoke - the proper callbacks. The transport instantiates an object - of this class before beginning the request. - - progfunc - A function to be used as a progress callback. - The preferred method is is use progtrack/progclass, but - light-weight implementations may use progfunc instead, - especially if they don't need per-file updates. - - read_filepath - If the request is sending a file, include - the path here, as this is the most efficient way to send - the data. - - read_fobj - If the request is sending a large payload, - this points to a fileobject from which the data may be - read. - - repouri - This is the URL stem that identifies the repo. - It's a subset of url. It's also used by the stats system. - - sslcert - If the request is using SSL, HTTPS for example, - provide a path to the SSL certificate here. - - sslkey - If the request is using SSL, like HTTPS for example, - provide a path to the SSL key here. - - uuid - In order to remove the request from the list of - many possible requests, supply a unique identifier in uuid. - - proxy - If the request should be performed using a proxy, - that proxy should be specified here. - - runtime_proxy - In order to avoid repeated environment lookups - we pass the proxy that should be used at runtime, which may - differ from the 'proxy' value. - - system - whether this request is on behalf of a system - publisher. Usually this isn't necessary, as the - TransportRepoURI will have been configured with correct proxy - and runtime_proxy properties. However, for direct access to - resources served by the system-repository, we use this to - prevent $http_proxy environment variables from being used. - - A TransportRequest must contain enough information to uniquely - identify any pkg.client.publisher.TransportRepoURI - in - particular, it must contain all fields used by - TransportRepoURI.key() which is currently the (url, proxy) - tuple, and is used as the key when recording/retrieving - transport statistics.""" - - self.url = url - self.filepath = filepath - self.writefunc = writefunc - self.hdrfunc = hdrfunc - self.header = header - self.data = data - self.httpmethod = httpmethod - self.progclass = progclass - self.progtrack = progtrack - self.progfunc = progfunc - self.repourl = repourl - self.sslcert = sslcert - self.sslkey = sslkey - self.compressible = compressible - self.uuid = uuid - self.read_fobj = read_fobj - self.read_filepath = read_filepath - self.failonerror = failonerror - self.proxy = proxy - self.runtime_proxy = runtime_proxy - self.system = system + """A class that contains per-request information for the underlying + transport engines. This is used to set per-request options that + are used either by the framework, the transport, or both.""" + + def __init__( + self, + url, + filepath=None, + writefunc=None, + hdrfunc=None, + header=None, + data=None, + httpmethod="GET", + progclass=None, + progtrack=None, + sslcert=None, + sslkey=None, + repourl=None, + compressible=False, + progfunc=None, + uuid=None, + read_fobj=None, + read_filepath=None, + failonerror=False, + proxy=None, + runtime_proxy=None, + system=False, + ): + """Create a TransportRequest with the following parameters: + + url - The url that the transport engine should retrieve + + filepath - If defined, the transport engine will download the + file to this path. If not defined, the caller should + supply a write function. + + writefunc - A function, supplied instead of filepath, that + reads the bytes supplied by the transport engine and writes + them somewhere for processing. This is a callback. + + hdrfunc - A callback for examining the contents of header + data in a response to a transport request. + + header - A dictionary of key/value pairs to be included + in the request's header. + + compressible - A boolean value that indicates whether + the content that is requested is a candidate for transport + level compression. + + data - If the request is sending a data payload, include + the data in this argument. + + failonerror - If the request returns a HTTP code >= 400, + terminate the request early, instead of running it to + completion. + + httpmethod - If the request is a HTTP/HTTPS request, + this can override the default HTTP method of GET. + + progtrack - If the transport wants the engine to update + the progress of the download, supply a ProgressTracker + object in this argument. + + progclass - If the transport was supplied with a ProgressTracker + this must point to a class that knows how to wrap the progress + tracking object in way that allows the transport to invoke + the proper callbacks. The transport instantiates an object + of this class before beginning the request. + + progfunc - A function to be used as a progress callback. + The preferred method is is use progtrack/progclass, but + light-weight implementations may use progfunc instead, + especially if they don't need per-file updates. + + read_filepath - If the request is sending a file, include + the path here, as this is the most efficient way to send + the data. + + read_fobj - If the request is sending a large payload, + this points to a fileobject from which the data may be + read. + + repouri - This is the URL stem that identifies the repo. + It's a subset of url. It's also used by the stats system. + + sslcert - If the request is using SSL, HTTPS for example, + provide a path to the SSL certificate here. + + sslkey - If the request is using SSL, like HTTPS for example, + provide a path to the SSL key here. + + uuid - In order to remove the request from the list of + many possible requests, supply a unique identifier in uuid. + + proxy - If the request should be performed using a proxy, + that proxy should be specified here. + + runtime_proxy - In order to avoid repeated environment lookups + we pass the proxy that should be used at runtime, which may + differ from the 'proxy' value. + + system - whether this request is on behalf of a system + publisher. Usually this isn't necessary, as the + TransportRepoURI will have been configured with correct proxy + and runtime_proxy properties. However, for direct access to + resources served by the system-repository, we use this to + prevent $http_proxy environment variables from being used. + + A TransportRequest must contain enough information to uniquely + identify any pkg.client.publisher.TransportRepoURI - in + particular, it must contain all fields used by + TransportRepoURI.key() which is currently the (url, proxy) + tuple, and is used as the key when recording/retrieving + transport statistics.""" + + self.url = url + self.filepath = filepath + self.writefunc = writefunc + self.hdrfunc = hdrfunc + self.header = header + self.data = data + self.httpmethod = httpmethod + self.progclass = progclass + self.progtrack = progtrack + self.progfunc = progfunc + self.repourl = repourl + self.sslcert = sslcert + self.sslkey = sslkey + self.compressible = compressible + self.uuid = uuid + self.read_fobj = read_fobj + self.read_filepath = read_filepath + self.failonerror = failonerror + self.proxy = proxy + self.runtime_proxy = runtime_proxy + self.system = system + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/exception.py b/src/modules/client/transport/exception.py index ef532f60b..ee69fda51 100644 --- a/src/modules/client/transport/exception.py +++ b/src/modules/client/transport/exception.py @@ -30,19 +30,28 @@ from functools import total_ordering from six.moves import http_client -retryable_http_errors = set((http_client.REQUEST_TIMEOUT, http_client.BAD_GATEWAY, - http_client.GATEWAY_TIMEOUT, http_client.NOT_FOUND)) -retryable_file_errors = set((pycurl.E_FILE_COULDNT_READ_FILE, errno.EAGAIN, - errno.ENOENT)) +retryable_http_errors = set( + ( + http_client.REQUEST_TIMEOUT, + http_client.BAD_GATEWAY, + http_client.GATEWAY_TIMEOUT, + http_client.NOT_FOUND, + ) +) +retryable_file_errors = set( + (pycurl.E_FILE_COULDNT_READ_FILE, errno.EAGAIN, errno.ENOENT) +) import pkg.client.api_errors as api_errors # Errors that stats.py may include in a decay-able error rate decayable_http_errors = set((http_client.NOT_FOUND,)) -decayable_file_errors = set((pycurl.E_FILE_COULDNT_READ_FILE, errno.EAGAIN, - errno.ENOENT)) -decayable_pycurl_errors = set((pycurl.E_OPERATION_TIMEOUTED, - pycurl.E_COULDNT_CONNECT)) +decayable_file_errors = set( + (pycurl.E_FILE_COULDNT_READ_FILE, errno.EAGAIN, errno.ENOENT) +) +decayable_pycurl_errors = set( + (pycurl.E_OPERATION_TIMEOUTED, pycurl.E_COULDNT_CONNECT) +) # Different protocols may have different retryable errors. Map proto # to set of retryable errors. @@ -59,439 +68,459 @@ "https": decayable_http_errors, } -proto_code_map = { - "http": http_client.responses, - "https": http_client.responses -} +proto_code_map = {"http": http_client.responses, "https": http_client.responses} + +retryable_pycurl_errors = set( + ( + pycurl.E_COULDNT_CONNECT, + pycurl.E_PARTIAL_FILE, + pycurl.E_OPERATION_TIMEOUTED, + pycurl.E_GOT_NOTHING, + pycurl.E_SEND_ERROR, + pycurl.E_RECV_ERROR, + pycurl.E_COULDNT_RESOLVE_HOST, + pycurl.E_TOO_MANY_REDIRECTS, + pycurl.E_BAD_CONTENT_ENCODING, + ) +) -retryable_pycurl_errors = set((pycurl.E_COULDNT_CONNECT, pycurl.E_PARTIAL_FILE, - pycurl.E_OPERATION_TIMEOUTED, pycurl.E_GOT_NOTHING, pycurl.E_SEND_ERROR, - pycurl.E_RECV_ERROR, pycurl.E_COULDNT_RESOLVE_HOST, - pycurl.E_TOO_MANY_REDIRECTS, pycurl.E_BAD_CONTENT_ENCODING)) class TransportException(api_errors.TransportError): - """Base class for various exceptions thrown by code in transport - package.""" + """Base class for various exceptions thrown by code in transport + package.""" - def __init__(self): - self.count = 1 - self.decayable = False - self.retryable = False + def __init__(self): + self.count = 1 + self.decayable = False + self.retryable = False class TransportOperationError(TransportException): - """Used when transport operations fail for miscellaneous reasons.""" + """Used when transport operations fail for miscellaneous reasons.""" - def __init__(self, data): - TransportException.__init__(self) - self.data = data + def __init__(self, data): + TransportException.__init__(self) + self.data = data - def __str__(self): - return str(self.data) + def __str__(self): + return str(self.data) class TransportFailures(TransportException): - """This exception encapsulates multiple transport exceptions.""" - - # - # This class is a subclass of TransportException so that calling - # code can reasonably 'except TransportException' and get either - # a single-valued or in this case a multi-valued instance. - # - def __init__(self, pfmri=None): - TransportException.__init__(self) - self.exceptions = [] - self.pfmri = pfmri - - def append(self, exc): - found = False - - assert isinstance(exc, TransportException) - for x in self.exceptions: - if x == exc: - x.count += 1 - found = True - break - - if not found: - self.exceptions.append(exc) - - def extend(self, exc_list): - for exc in exc_list: - self.append(exc) - - def __str__(self): - if len(self.exceptions) == 0: - return "[no errors accumulated]" - - s = "" - if self.pfmri: - s += "{0}\n".format(self.pfmri) - - for i, x in enumerate(self.exceptions): - s += " " - if len(self.exceptions) > 1: - s += "{0:d}: ".format(i + 1) - s += str(x) - if x.count > 1: - s += _(" (happened {0:d} times)").format( - x.count) - s += "\n" - s += self._str_autofix() - return s - - def __len__(self): - return len(self.exceptions) + """This exception encapsulates multiple transport exceptions.""" + + # + # This class is a subclass of TransportException so that calling + # code can reasonably 'except TransportException' and get either + # a single-valued or in this case a multi-valued instance. + # + def __init__(self, pfmri=None): + TransportException.__init__(self) + self.exceptions = [] + self.pfmri = pfmri + + def append(self, exc): + found = False + + assert isinstance(exc, TransportException) + for x in self.exceptions: + if x == exc: + x.count += 1 + found = True + break + + if not found: + self.exceptions.append(exc) + + def extend(self, exc_list): + for exc in exc_list: + self.append(exc) + + def __str__(self): + if len(self.exceptions) == 0: + return "[no errors accumulated]" + + s = "" + if self.pfmri: + s += "{0}\n".format(self.pfmri) + + for i, x in enumerate(self.exceptions): + s += " " + if len(self.exceptions) > 1: + s += "{0:d}: ".format(i + 1) + s += str(x) + if x.count > 1: + s += _(" (happened {0:d} times)").format(x.count) + s += "\n" + s += self._str_autofix() + return s + + def __len__(self): + return len(self.exceptions) @total_ordering class TransportProtoError(TransportException): - """Raised when errors occur in the transport protocol.""" - - def __init__(self, proto, code=None, url=None, reason=None, - repourl=None, request=None, uuid=None, details=None, proxy=None): - TransportException.__init__(self) - self.proto = proto - self.code = code - self.url = url - self.urlstem = repourl - self.reason = reason - self.request = request - self.decayable = self.code in decayable_proto_errors[self.proto] - self.retryable = self.code in retryable_proto_errors[self.proto] - self.uuid = uuid - self.details = details - self.proxy = proxy - self.codename = "" - codenames = [ - name - for name in vars(pycurl) - if len(name) > 1 and name[:2] == "E_" and \ - getattr(pycurl, name) == code - ] - if len(codenames) >= 1: - self.codename = codenames[0] - - def __str__(self): - s = "{0} protocol error".format(self.proto) - if self.code and self.codename: - s += ": code: {0} ({1:d})".format( - self.codename, self.code) - elif self.code: - s += ": Unknown error code: {0:d}".format(self.code) - if self.reason: - s += " reason: {0}".format(self.reason) - if self.url: - s += "\nURL: '{0}'".format(self.url) - elif self.urlstem: - # If the location of the resource isn't known because - # the error was encountered while attempting to find - # the location, then at least knowing where it was - # looking will be helpful. - s += "\nRepository URL: '{0}'.".format(self.urlstem) - if self.proxy: - s += "\nProxy: '{0}'".format(self.proxy) - if self.details: - s +="\nAdditional Details:\n{0}".format(self.details) - return s - - def key(self): - return (self.proto, self.code, self.url, self.details, - self.reason) - - def __eq__(self, other): - if not isinstance(other, TransportProtoError): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, TransportProtoError): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when errors occur in the transport protocol.""" + + def __init__( + self, + proto, + code=None, + url=None, + reason=None, + repourl=None, + request=None, + uuid=None, + details=None, + proxy=None, + ): + TransportException.__init__(self) + self.proto = proto + self.code = code + self.url = url + self.urlstem = repourl + self.reason = reason + self.request = request + self.decayable = self.code in decayable_proto_errors[self.proto] + self.retryable = self.code in retryable_proto_errors[self.proto] + self.uuid = uuid + self.details = details + self.proxy = proxy + self.codename = "" + codenames = [ + name + for name in vars(pycurl) + if len(name) > 1 + and name[:2] == "E_" + and getattr(pycurl, name) == code + ] + if len(codenames) >= 1: + self.codename = codenames[0] + + def __str__(self): + s = "{0} protocol error".format(self.proto) + if self.code and self.codename: + s += ": code: {0} ({1:d})".format(self.codename, self.code) + elif self.code: + s += ": Unknown error code: {0:d}".format(self.code) + if self.reason: + s += " reason: {0}".format(self.reason) + if self.url: + s += "\nURL: '{0}'".format(self.url) + elif self.urlstem: + # If the location of the resource isn't known because + # the error was encountered while attempting to find + # the location, then at least knowing where it was + # looking will be helpful. + s += "\nRepository URL: '{0}'.".format(self.urlstem) + if self.proxy: + s += "\nProxy: '{0}'".format(self.proxy) + if self.details: + s += "\nAdditional Details:\n{0}".format(self.details) + return s + + def key(self): + return (self.proto, self.code, self.url, self.details, self.reason) + + def __eq__(self, other): + if not isinstance(other, TransportProtoError): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, TransportProtoError): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) @total_ordering class TransportFrameworkError(TransportException): - """Raised when errors occur in the transport framework.""" - - def __init__(self, code, url=None, reason=None, repourl=None, - uuid=None, proxy=None): - TransportException.__init__(self) - self.code = code - self.url = url - self.urlstem = repourl - self.reason = reason - self.decayable = self.code in decayable_pycurl_errors - self.retryable = self.code in retryable_pycurl_errors - self.uuid = uuid - self.proxy = proxy - self.codename = "" - codenames = [ - name - for name in vars(pycurl) - if len(name) > 1 and name[:2] == "E_" and \ - getattr(pycurl, name) == code - ] - if len(codenames) >= 1: - self.codename = codenames[0] - - def __str__(self): - if self.codename: - s = "Framework error: code: {0} ({1:d})".format( - self.codename, self.code) - else: - s = "Unkown Framework error code: {0:d}".format( - self.code) - if self.reason: - s += " reason: {0}".format(self.reason) - if self.url: - s += "\nURL: '{0}'".format(self.url) - if self.proxy: - s += "\nProxy: '{0}'".format(self.proxy) - s += self._str_autofix() - return s - - def key(self): - return (self.code, self.url, self.proxy, self.reason) - - def __eq__(self, other): - if not isinstance(other, TransportFrameworkError): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, TransportFrameworkError): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when errors occur in the transport framework.""" + + def __init__( + self, code, url=None, reason=None, repourl=None, uuid=None, proxy=None + ): + TransportException.__init__(self) + self.code = code + self.url = url + self.urlstem = repourl + self.reason = reason + self.decayable = self.code in decayable_pycurl_errors + self.retryable = self.code in retryable_pycurl_errors + self.uuid = uuid + self.proxy = proxy + self.codename = "" + codenames = [ + name + for name in vars(pycurl) + if len(name) > 1 + and name[:2] == "E_" + and getattr(pycurl, name) == code + ] + if len(codenames) >= 1: + self.codename = codenames[0] + + def __str__(self): + if self.codename: + s = "Framework error: code: {0} ({1:d})".format( + self.codename, self.code + ) + else: + s = "Unkown Framework error code: {0:d}".format(self.code) + if self.reason: + s += " reason: {0}".format(self.reason) + if self.url: + s += "\nURL: '{0}'".format(self.url) + if self.proxy: + s += "\nProxy: '{0}'".format(self.proxy) + s += self._str_autofix() + return s + + def key(self): + return (self.code, self.url, self.proxy, self.reason) + + def __eq__(self, other): + if not isinstance(other, TransportFrameworkError): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, TransportFrameworkError): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) @total_ordering class TransportStallError(TransportException): - """Raised when stalls occur in the transport framework.""" - - def __init__(self, url=None, repourl=None, uuid=None, proxy=None): - TransportException.__init__(self) - self.url = url - self.urlstem = repourl - self.retryable = True - self.uuid = uuid - self.proxy = proxy - - def __str__(self): - s = "Framework stall" - if self.url or self.proxy: - s += ":" - if self.url: - s += "\nURL: '{0}'".format(self.url) - if self.proxy: - s += "\nProxy: '{0}'".format(self.proxy) - return s - - def key(self): - return (self.url, self.proxy) - - def __eq__(self, other): - if not isinstance(other, TransportStallError): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, TransportStallError): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when stalls occur in the transport framework.""" + + def __init__(self, url=None, repourl=None, uuid=None, proxy=None): + TransportException.__init__(self) + self.url = url + self.urlstem = repourl + self.retryable = True + self.uuid = uuid + self.proxy = proxy + + def __str__(self): + s = "Framework stall" + if self.url or self.proxy: + s += ":" + if self.url: + s += "\nURL: '{0}'".format(self.url) + if self.proxy: + s += "\nProxy: '{0}'".format(self.proxy) + return s + + def key(self): + return (self.url, self.proxy) + + def __eq__(self, other): + if not isinstance(other, TransportStallError): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, TransportStallError): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) @total_ordering class TransferContentException(TransportException): - """Raised when there are problems downloading the requested content.""" - - def __init__(self, url, reason=None, proxy=None): - TransportException.__init__(self) - self.url = url - self.reason = reason - self.retryable = True - self.proxy = proxy - - def __str__(self): - if self.proxy: - s = "Transfer from '{0}' via proxy '{1}' failed".format( - self.url, self.proxy) - else: - s = "Transfer from '{0}' failed".format(self.url) - if self.reason: - s += ": {0}".format(self.reason) - s += "." - return s - - def key(self): - return (self.url, self.proxy, self.reason) - - def __eq__(self, other): - if not isinstance(other, TransferContentException): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, TransferContentException): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when there are problems downloading the requested content.""" + + def __init__(self, url, reason=None, proxy=None): + TransportException.__init__(self) + self.url = url + self.reason = reason + self.retryable = True + self.proxy = proxy + + def __str__(self): + if self.proxy: + s = "Transfer from '{0}' via proxy '{1}' failed".format( + self.url, self.proxy + ) + else: + s = "Transfer from '{0}' failed".format(self.url) + if self.reason: + s += ": {0}".format(self.reason) + s += "." + return s + + def key(self): + return (self.url, self.proxy, self.reason) + + def __eq__(self, other): + if not isinstance(other, TransferContentException): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, TransferContentException): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) @total_ordering class InvalidContentException(TransportException): - """Raised when the content's hash/chash doesn't verify, or the - content is received in an unreadable format.""" - - def __init__(self, path=None, reason=None, size=0, url=None, proxy=None): - TransportException.__init__(self) - self.path = path - self.reason = reason - self.size = size - self.retryable = True - self.url = url - self.proxy = proxy - - def __str__(self): - s = "Invalid content" - if self.path: - s += "path {0}".format(self.path) - if self.reason: - s += ": {0}.".format(self.reason) - if self.url: - s += "\nURL: {0}".format(self.url) - if self.proxy: - s += "\nProxy: {0}".format(self.proxy) - return s - - def key(self): - return (self.path, self.reason, self.proxy, self.url) - - def __eq__(self, other): - if not isinstance(other, InvalidContentException): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, InvalidContentException): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when the content's hash/chash doesn't verify, or the + content is received in an unreadable format.""" + + def __init__(self, path=None, reason=None, size=0, url=None, proxy=None): + TransportException.__init__(self) + self.path = path + self.reason = reason + self.size = size + self.retryable = True + self.url = url + self.proxy = proxy + + def __str__(self): + s = "Invalid content" + if self.path: + s += "path {0}".format(self.path) + if self.reason: + s += ": {0}.".format(self.reason) + if self.url: + s += "\nURL: {0}".format(self.url) + if self.proxy: + s += "\nProxy: {0}".format(self.proxy) + return s + + def key(self): + return (self.path, self.reason, self.proxy, self.url) + + def __eq__(self, other): + if not isinstance(other, InvalidContentException): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, InvalidContentException): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) @total_ordering class PkgProtoError(TransportException): - """Raised when the pkg protocol doesn't behave according to - specification. This is different than TransportProtoError, which - deals with the L7 protocols that we can use to perform a pkg(7) - transport operation. Although it doesn't exist, this is essentially - a L8 error, since our pkg protocol is built on top of application - level protocols. The Framework errors deal with L3-6 errors.""" - - def __init__(self, url, operation=None, version=None, reason=None, - proxy=None): - TransportException.__init__(self) - self.url = url - self.reason = reason - self.operation = operation - self.version = version - self.proxy = proxy - - def __str__(self): - if self.proxy: - s = "Invalid pkg(7) response from {0} (proxy {1})".format( - self.url, self.proxy) - else: - s = "Invalid pkg(7) response from {0}".format(self.url) - if self.operation: - s += ": Attempting operation '{0}'".format(self.operation) - if self.version is not None: - s += " version {0}".format(self.version) - if self.reason: - s += ":\n{0}".format(self.reason) - return s - - def key(self): - return (self.url, self.operation, self.version, - self.proxy, self.reason) - - def __eq__(self, other): - if not isinstance(other, PkgProtoError): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, PkgProtoError): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when the pkg protocol doesn't behave according to + specification. This is different than TransportProtoError, which + deals with the L7 protocols that we can use to perform a pkg(7) + transport operation. Although it doesn't exist, this is essentially + a L8 error, since our pkg protocol is built on top of application + level protocols. The Framework errors deal with L3-6 errors.""" + + def __init__( + self, url, operation=None, version=None, reason=None, proxy=None + ): + TransportException.__init__(self) + self.url = url + self.reason = reason + self.operation = operation + self.version = version + self.proxy = proxy + + def __str__(self): + if self.proxy: + s = "Invalid pkg(7) response from {0} (proxy {1})".format( + self.url, self.proxy + ) + else: + s = "Invalid pkg(7) response from {0}".format(self.url) + if self.operation: + s += ": Attempting operation '{0}'".format(self.operation) + if self.version is not None: + s += " version {0}".format(self.version) + if self.reason: + s += ":\n{0}".format(self.reason) + return s + + def key(self): + return (self.url, self.operation, self.version, self.proxy, self.reason) + + def __eq__(self, other): + if not isinstance(other, PkgProtoError): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, PkgProtoError): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) @total_ordering class ExcessiveTransientFailure(TransportException): - """Raised when the transport encounters too many retryable errors - at a single endpoint.""" - - def __init__(self, url, count, proxy=None): - TransportException.__init__(self) - self.url = url - self.count = count - self.retryable = True - self.failures = None - self.success = None - self.proxy = proxy - - def __str__(self): - s = "Too many retryable errors encountered during transfer.\n" - if self.url: - s += "URL: {0} ".format(self.url) - if self.proxy: - s += "Proxy: {0}".format(self.proxy) - if self.count: - s += "Count: {0} ".format(self.count) - return s - - def key(self): - return (self.url, self.proxy, self.count) - - def __eq__(self, other): - if not isinstance(other, ExcessiveTransientFailure): - return False - return self.key() == other.key() - - def __lt__(self, other): - if not isinstance(other, ExcessiveTransientFailure): - return True - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) + """Raised when the transport encounters too many retryable errors + at a single endpoint.""" + + def __init__(self, url, count, proxy=None): + TransportException.__init__(self) + self.url = url + self.count = count + self.retryable = True + self.failures = None + self.success = None + self.proxy = proxy + + def __str__(self): + s = "Too many retryable errors encountered during transfer.\n" + if self.url: + s += "URL: {0} ".format(self.url) + if self.proxy: + s += "Proxy: {0}".format(self.proxy) + if self.count: + s += "Count: {0} ".format(self.count) + return s + + def key(self): + return (self.url, self.proxy, self.count) + + def __eq__(self, other): + if not isinstance(other, ExcessiveTransientFailure): + return False + return self.key() == other.key() + + def __lt__(self, other): + if not isinstance(other, ExcessiveTransientFailure): + return True + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) class mDNSException(TransportException): - """Used when mDNS operations fail.""" + """Used when mDNS operations fail.""" + + def __init__(self, errstr): + TransportException.__init__(self) + self.err = errstr - def __init__(self, errstr): - TransportException.__init__(self) - self.err = errstr + def __str__(self): + return self.err - def __str__(self): - return self.err # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/fileobj.py b/src/modules/client/transport/fileobj.py index 9363e940b..28070e1e2 100644 --- a/src/modules/client/transport/fileobj.py +++ b/src/modules/client/transport/fileobj.py @@ -30,375 +30,373 @@ from pkg.misc import DummyLock, force_str + class StreamingFileObj(object): + def __init__(self, url, engine, ccancel=None): + """Create a streaming file object that wraps around a + transport engine. This is only necessary if the underlying + transport doesn't have its own streaming interface and the + repo operation needs a streaming response.""" + + self.__buf = b"" + self.__url = url + self.__engine = engine + self.__data_callback_invoked = False + self.__headers_arrived = False + self.__httpmsg = None + self.__headers = {} + self.__done = False + self.__check_cancelation = ccancel + self.__lock = DummyLock() + self.__uuid = uuidm.uuid4().int + # Free buffer on exception. Set to False if caller may + # read buffer after exception. Caller should call close() + # to cleanup afterwards. + self.free_buffer = True + + def __del__(self): + release = False + try: + if not self.__done: + if not self.__lock._is_owned(): + self.__lock.acquire() + release = True + self.__engine.orphaned_request(self.__url, self.__uuid) + except AttributeError: + # Ignore attribute error if instance is deleted + # before initialization completes. + pass + finally: + if release: + self.__lock.release() + + # File object methods - def __init__(self, url, engine, ccancel=None): - """Create a streaming file object that wraps around a - transport engine. This is only necessary if the underlying - transport doesn't have its own streaming interface and the - repo operation needs a streaming response.""" - - self.__buf = b"" - self.__url = url - self.__engine = engine - self.__data_callback_invoked = False - self.__headers_arrived = False - self.__httpmsg = None - self.__headers = {} - self.__done = False - self.__check_cancelation = ccancel - self.__lock = DummyLock() - self.__uuid = uuidm.uuid4().int - # Free buffer on exception. Set to False if caller may - # read buffer after exception. Caller should call close() - # to cleanup afterwards. - self.free_buffer = True - - def __del__(self): - release = False - try: - if not self.__done: - if not self.__lock._is_owned(): - self.__lock.acquire() - release = True - self.__engine.orphaned_request(self.__url, - self.__uuid) - except AttributeError: - # Ignore attribute error if instance is deleted - # before initialization completes. - pass - finally: - if release: - self.__lock.release() - - # File object methods - - def close(self): - # Caller shouldn't hold lock when calling this method - assert not self.__lock._is_owned() - - if not self.__done: - self.__lock.acquire() - try: - self.__engine.remove_request(self.__url, - self.__uuid) - self.__done = True - finally: - self.__lock.release() - self.__buf = b"" - self.__engine = None - self.__url = None - - def flush(self): - """flush the buffer. Since this supports read, but - not write, this is a noop.""" - return - - def read(self, size=-1): - """Read size bytes from the remote connection. - If size isn't specified, read all of the data from - the remote side.""" - - # Caller shouldn't hold lock when calling this method - assert not self.__lock._is_owned() - - if size < 0: - while self.__fill_buffer(): - # just fill the buffer - pass - curdata = self.__buf - self.__buf = b"" - return curdata - else: - curdata = self.__buf - datalen = len(curdata) - if datalen >= size: - self.__buf = curdata[size:] - return curdata[:size] - while self.__fill_buffer(): - datalen = len(self.__buf) - if datalen >= size: - break - - curdata = self.__buf - datalen = len(curdata) - if datalen >= size: - self.__buf = curdata[size:] - return curdata[:size] - - self.__buf = b"" - return curdata - - def readline(self, size=-1): - """Read a line from the remote host. If size is - specified, read to newline or size, whichever is smaller. - We force the return value to be str here since the caller - expect str.""" - - # Caller shouldn't hold lock when calling this method - assert not self.__lock._is_owned() - - if size < 0: - curdata = self.__buf - newline = curdata.find(b"\n") - if newline >= 0: - newline += 1 - self.__buf = curdata[newline:] - return force_str(curdata[:newline]) - while self.__fill_buffer(): - newline = self.__buf.find(b"\n") - if newline >= 0: - break - - curdata = self.__buf - newline = curdata.find(b"\n") - if newline >= 0: - newline += 1 - self.__buf = curdata[newline:] - return force_str(curdata[:newline]) - self.__buf = b"" - return force_str(curdata) - else: - curdata = self.__buf - newline = curdata.find(b"\n", 0, size) - datalen = len(curdata) - if newline >= 0: - newline += 1 - self.__buf = curdata[newline:] - return force_str(curdata[:newline]) - if datalen >= size: - self.__buf = curdata[size:] - return force_str(curdata[:size]) - while self.__fill_buffer(): - newline = self.__buf.find(b"\n", 0, size) - datalen = len(self.__buf) - if newline >= 0: - break - if datalen >= size: - break - - curdata = self.__buf - newline = curdata.find(b"\n", 0, size) - datalen = len(curdata) - if newline >= 0: - newline += 1 - self.__buf = curdata[newline:] - return force_str(curdata[:newline]) - if datalen >= size: - self.__buf = curdata[size:] - return force_str(curdata[:size]) - self.__buf = b"" - return force_str(curdata) - - def readlines(self, sizehint=0): - """Read lines from the remote host, returning an - array of the lines that were read. sizehint specifies - an approximate size, in bytes, of the total amount of data, - as lines, that should be returned to the caller.""" - - # Caller shouldn't hold lock when calling this method - assert not self.__lock._is_owned() - - read = 0 - lines = [] - while True: - l = self.readline() - if not l: - break - lines.append(l) - read += len(l) - if sizehint and read >= sizehint: - break - - return lines - - def write(self, data): - raise NotImplementedError - - def writelines(self, llist): - raise NotImplementedError - - # Methods that access the callbacks - - def get_write_func(self): - return self.__write_callback - - def get_header_func(self): - return self.__header_callback - - def get_progress_func(self): - return self.__progress_callback - - # Miscellaneous accessors - - def set_lock(self, lock): - self.__lock = lock - - @property - def uuid(self): - return self.__uuid - - # Header and message methods - - @property - def headers(self): - if not self.__headers_arrived: - self.__fill_headers() - return self.__headers - - def get_http_message(self): - """Return the status message that may be included - with a numerical HTTP response code. Not all HTTP - implementations are guaranteed to return this value. - In some cases it may be None.""" - - return self.__httpmsg - - def getheader(self, hdr, default): - """Return the HTTP header named hdr. If the hdr - isn't present, return default value instead.""" - - if not self.__headers_arrived: - self.__fill_headers() - - return self.__headers.get(hdr.lower(), default) - - def _prime(self): - """Used by the underlying transport before handing this - object off to other layers. It ensures that the object's - creator can catch errors that occur at connection time. - All callers must still catch transport exceptions, however.""" - - self.__fill_buffer(1) - - # Iterator methods - - def __iter__(self): - return self - - def __next__(self): - line = self.readline() - if not line: - raise StopIteration - return line - - next = __next__ - - # Private methods - - def __fill_buffer(self, size=-1): - """Call engine.run() to fill the file object's buffer. - Read until we might block. If size is specified, stop - once we get at least size bytes, or might block, - whichever comes first.""" - - engine = self.__engine - - if not engine: - return False - - self.__lock.acquire() - while 1: - if self.__done: - self.__lock.release() - return False - elif not engine.pending: - # nothing pending means no more transfer - self.__done = True - s = engine.check_status([self.__url]) - if s: - # Cleanup prior to raising exception - self.__lock.release() - if self.free_buffer: - self.close() - raise s[0] - - self.__lock.release() - return False - - try: - engine.run() - except tx.ExcessiveTransientFailure as ex: - s = engine.check_status([self.__url]) - ex.failures = s - self.__lock.release() - if self.free_buffer: - self.close() - raise - except: - # Cleanup and close, if exception - # raised by run. - self.__lock.release() - if self.free_buffer: - self.close() - raise - - if size > 0 and len(self.__buf) < size: - # loop if we need more data in the buffer - continue - else: - # break out of this loop - break + def close(self): + # Caller shouldn't hold lock when calling this method + assert not self.__lock._is_owned() + if not self.__done: + self.__lock.acquire() + try: + self.__engine.remove_request(self.__url, self.__uuid) + self.__done = True + finally: self.__lock.release() - return True - - def __fill_headers(self): - """Run the transport until headers arrive. When the data - callback gets invoked, all headers have arrived. The - alternate scenario is when no data arrives, but the server - isn't providing more input isi over the network. In that case, - the client either received just headers, or had the transfer - close unexpectedly.""" - - while not self.__data_callback_invoked: - if not self.__fill_buffer(): - # We hit this case if we get headers - # but no data. - break - - self.__headers_arrived = True - - def __progress_callback(self, dltot, dlcur, ultot, ulcur): - """Called by pycurl/libcurl framework to update - progress tracking.""" - - if self.__check_cancelation and self.__check_cancelation(): - return -1 - - return 0 - - def __write_callback(self, data): - """A callback given to transport engine that writes data - into a buffer in this object.""" - - if not self.__data_callback_invoked: - self.__data_callback_invoked = True - - # We don't force data to str here because data could be from a - # gizpped file, which contains gzip magic number that can't be - # decoded by 'utf-8'. - self.__buf = self.__buf + data - - def __header_callback(self, data): - """A callback given to the transport engine. It reads header - information from the transport. This function saves - the message from the http response, as well as a dictionary - of headers that it can parse.""" - - if data.startswith(b"HTTP/"): - rtup = data.split(None, 2) - try: - self.__httpmsg = rtup[2] - except IndexError: - pass - - elif data.find(b":") > -1: - k, v = data.split(b":", 1) - if v: - # convert to str as early as we can - self.__headers[force_str(k.lower())] = \ - force_str(v.strip()) + self.__buf = b"" + self.__engine = None + self.__url = None + + def flush(self): + """flush the buffer. Since this supports read, but + not write, this is a noop.""" + return + + def read(self, size=-1): + """Read size bytes from the remote connection. + If size isn't specified, read all of the data from + the remote side.""" + + # Caller shouldn't hold lock when calling this method + assert not self.__lock._is_owned() + + if size < 0: + while self.__fill_buffer(): + # just fill the buffer + pass + curdata = self.__buf + self.__buf = b"" + return curdata + else: + curdata = self.__buf + datalen = len(curdata) + if datalen >= size: + self.__buf = curdata[size:] + return curdata[:size] + while self.__fill_buffer(): + datalen = len(self.__buf) + if datalen >= size: + break + + curdata = self.__buf + datalen = len(curdata) + if datalen >= size: + self.__buf = curdata[size:] + return curdata[:size] + + self.__buf = b"" + return curdata + + def readline(self, size=-1): + """Read a line from the remote host. If size is + specified, read to newline or size, whichever is smaller. + We force the return value to be str here since the caller + expect str.""" + + # Caller shouldn't hold lock when calling this method + assert not self.__lock._is_owned() + + if size < 0: + curdata = self.__buf + newline = curdata.find(b"\n") + if newline >= 0: + newline += 1 + self.__buf = curdata[newline:] + return force_str(curdata[:newline]) + while self.__fill_buffer(): + newline = self.__buf.find(b"\n") + if newline >= 0: + break + + curdata = self.__buf + newline = curdata.find(b"\n") + if newline >= 0: + newline += 1 + self.__buf = curdata[newline:] + return force_str(curdata[:newline]) + self.__buf = b"" + return force_str(curdata) + else: + curdata = self.__buf + newline = curdata.find(b"\n", 0, size) + datalen = len(curdata) + if newline >= 0: + newline += 1 + self.__buf = curdata[newline:] + return force_str(curdata[:newline]) + if datalen >= size: + self.__buf = curdata[size:] + return force_str(curdata[:size]) + while self.__fill_buffer(): + newline = self.__buf.find(b"\n", 0, size) + datalen = len(self.__buf) + if newline >= 0: + break + if datalen >= size: + break + + curdata = self.__buf + newline = curdata.find(b"\n", 0, size) + datalen = len(curdata) + if newline >= 0: + newline += 1 + self.__buf = curdata[newline:] + return force_str(curdata[:newline]) + if datalen >= size: + self.__buf = curdata[size:] + return force_str(curdata[:size]) + self.__buf = b"" + return force_str(curdata) + + def readlines(self, sizehint=0): + """Read lines from the remote host, returning an + array of the lines that were read. sizehint specifies + an approximate size, in bytes, of the total amount of data, + as lines, that should be returned to the caller.""" + + # Caller shouldn't hold lock when calling this method + assert not self.__lock._is_owned() + + read = 0 + lines = [] + while True: + l = self.readline() + if not l: + break + lines.append(l) + read += len(l) + if sizehint and read >= sizehint: + break + + return lines + + def write(self, data): + raise NotImplementedError + + def writelines(self, llist): + raise NotImplementedError + + # Methods that access the callbacks + + def get_write_func(self): + return self.__write_callback + + def get_header_func(self): + return self.__header_callback + + def get_progress_func(self): + return self.__progress_callback + + # Miscellaneous accessors + + def set_lock(self, lock): + self.__lock = lock + + @property + def uuid(self): + return self.__uuid + + # Header and message methods + + @property + def headers(self): + if not self.__headers_arrived: + self.__fill_headers() + return self.__headers + + def get_http_message(self): + """Return the status message that may be included + with a numerical HTTP response code. Not all HTTP + implementations are guaranteed to return this value. + In some cases it may be None.""" + + return self.__httpmsg + + def getheader(self, hdr, default): + """Return the HTTP header named hdr. If the hdr + isn't present, return default value instead.""" + + if not self.__headers_arrived: + self.__fill_headers() + + return self.__headers.get(hdr.lower(), default) + + def _prime(self): + """Used by the underlying transport before handing this + object off to other layers. It ensures that the object's + creator can catch errors that occur at connection time. + All callers must still catch transport exceptions, however.""" + + self.__fill_buffer(1) + + # Iterator methods + + def __iter__(self): + return self + + def __next__(self): + line = self.readline() + if not line: + raise StopIteration + return line + + next = __next__ + + # Private methods + + def __fill_buffer(self, size=-1): + """Call engine.run() to fill the file object's buffer. + Read until we might block. If size is specified, stop + once we get at least size bytes, or might block, + whichever comes first.""" + + engine = self.__engine + + if not engine: + return False + + self.__lock.acquire() + while 1: + if self.__done: + self.__lock.release() + return False + elif not engine.pending: + # nothing pending means no more transfer + self.__done = True + s = engine.check_status([self.__url]) + if s: + # Cleanup prior to raising exception + self.__lock.release() + if self.free_buffer: + self.close() + raise s[0] + + self.__lock.release() + return False + + try: + engine.run() + except tx.ExcessiveTransientFailure as ex: + s = engine.check_status([self.__url]) + ex.failures = s + self.__lock.release() + if self.free_buffer: + self.close() + raise + except: + # Cleanup and close, if exception + # raised by run. + self.__lock.release() + if self.free_buffer: + self.close() + raise + + if size > 0 and len(self.__buf) < size: + # loop if we need more data in the buffer + continue + else: + # break out of this loop + break + + self.__lock.release() + return True + + def __fill_headers(self): + """Run the transport until headers arrive. When the data + callback gets invoked, all headers have arrived. The + alternate scenario is when no data arrives, but the server + isn't providing more input isi over the network. In that case, + the client either received just headers, or had the transfer + close unexpectedly.""" + + while not self.__data_callback_invoked: + if not self.__fill_buffer(): + # We hit this case if we get headers + # but no data. + break + + self.__headers_arrived = True + + def __progress_callback(self, dltot, dlcur, ultot, ulcur): + """Called by pycurl/libcurl framework to update + progress tracking.""" + + if self.__check_cancelation and self.__check_cancelation(): + return -1 + + return 0 + + def __write_callback(self, data): + """A callback given to transport engine that writes data + into a buffer in this object.""" + + if not self.__data_callback_invoked: + self.__data_callback_invoked = True + + # We don't force data to str here because data could be from a + # gizpped file, which contains gzip magic number that can't be + # decoded by 'utf-8'. + self.__buf = self.__buf + data + + def __header_callback(self, data): + """A callback given to the transport engine. It reads header + information from the transport. This function saves + the message from the http response, as well as a dictionary + of headers that it can parse.""" + + if data.startswith(b"HTTP/"): + rtup = data.split(None, 2) + try: + self.__httpmsg = rtup[2] + except IndexError: + pass + + elif data.find(b":") > -1: + k, v = data.split(b":", 1) + if v: + # convert to str as early as we can + self.__headers[force_str(k.lower())] = force_str(v.strip()) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/mdetect.py b/src/modules/client/transport/mdetect.py index 9abc48c04..7e5ce6f8c 100644 --- a/src/modules/client/transport/mdetect.py +++ b/src/modules/client/transport/mdetect.py @@ -32,137 +32,147 @@ import pkg.client.transport.exception as tx try: - import pybonjour + import pybonjour except (OSError, ImportError): - pass + pass else: - import select + import select class MirrorDetector(object): - """This class uses mDNS and DNS-SD to find link-local content - mirrors that may be present on the client's subnet.""" - - def __init__(self): - self._mirrors = [] - self.__timeout = 1 - self.__service = "_pkg5._tcp" - - def __contains__(self, key): - return key in self._mirrors - - def __getitem__(self, pos): - return self._mirrors[pos] - - def __iter__(self): - """Each time iterator is invoked, randomly select up to - five mirrors from the list of available mirrors.""" - - listlen = len(self._mirrors) - iterlst = random.sample(range(listlen), min(listlen, 5)) - - for v in iterlst: - yield self._mirrors[v] - - def locate(self): - """When invoked, this populates the MirrorDetector object with - URLs that name dynamically discovered content mirrors.""" - - # Clear the list of mirrors. It will be repopulated later. - self._mirrors = [] - - if not "pybonjour" in globals(): - return - - timedout = False - tval = self.__timeout - - def browse_cb(sd_hdl, flags, interface_idx, error_code, - service_name, regtype, reply_domain): - - if error_code != pybonjour.kDNSServiceErr_NoError: - return - - if not (flags & pybonjour.kDNSServiceFlagsAdd): - return - - self._resolve_server(interface_idx, error_code, - service_name, regtype, reply_domain) - - try: - sd_hdl = pybonjour.DNSServiceBrowse( - regtype=self.__service, callBack=browse_cb) - except pybonjour.BonjourError as e: - errstr = "mDNS Service Browse Failed: {0}\n".format( - e.args[0][1]) - raise tx.mDNSException(errstr) - - try: - while not timedout: - avail = select.select([sd_hdl], [], [], tval) - if sd_hdl in avail[0]: - pybonjour.DNSServiceProcessResult( - sd_hdl) - tval = 0 - else: - timedout = True - except select.error as e: - errstr = "Select failed: {0}\n".format(e.args[1]) - raise tx.mDNSException(errstr) - except pybonjour.BonjourError as e: - errstr = "mDNS Process Result failed: {0}\n".format( - e.args[0][1]) - raise tx.mDNSException(errstr) - finally: - sd_hdl.close() - - def _resolve_server(self, if_idx, ec, service_name, regtype, - reply_domain): - """Invoked to resolve mDNS information about a service that - was discovered by a Browse call.""" - - timedout = False - tval = self.__timeout - - def resolve_cb(sd_hdl, flags, interface_idx, error_code, - full_name, host_target, port, txt_record): - - if error_code != pybonjour.kDNSServiceErr_NoError: - return - - tr = pybonjour.TXTRecord.parse(txt_record) - if "url" in tr: - url = tr["url"] - if not misc.valid_pub_url(url): - return - self._mirrors.append(pub.RepositoryURI(url)) - - try: - sd_hdl = pybonjour.DNSServiceResolve(0, if_idx, - service_name, regtype, reply_domain, resolve_cb) - except pybonjour.BonjourError as e: - errstr = "mDNS Service Resolve Failed: {0}\n".format( - e.args[0][1]) - raise tx.mDNSException(errstr) - - try: - while not timedout: - avail = select.select([sd_hdl], [], [], tval) - if sd_hdl in avail[0]: - pybonjour.DNSServiceProcessResult( - sd_hdl) - tval = 0 - else: - timedout = True - except select.error as e: - errstr = "Select failed; {0}\n".format(e.args[1]) - raise tx.mDNSException(errstr) - except pybonjour.BonjourError as e: - errstr = "mDNS Process Result Failed: {0}\n".format( - e.args[0][1]) - raise tx.mDNSException(errstr) - finally: - sd_hdl.close() + """This class uses mDNS and DNS-SD to find link-local content + mirrors that may be present on the client's subnet.""" + + def __init__(self): + self._mirrors = [] + self.__timeout = 1 + self.__service = "_pkg5._tcp" + + def __contains__(self, key): + return key in self._mirrors + + def __getitem__(self, pos): + return self._mirrors[pos] + + def __iter__(self): + """Each time iterator is invoked, randomly select up to + five mirrors from the list of available mirrors.""" + + listlen = len(self._mirrors) + iterlst = random.sample(range(listlen), min(listlen, 5)) + + for v in iterlst: + yield self._mirrors[v] + + def locate(self): + """When invoked, this populates the MirrorDetector object with + URLs that name dynamically discovered content mirrors.""" + + # Clear the list of mirrors. It will be repopulated later. + self._mirrors = [] + + if not "pybonjour" in globals(): + return + + timedout = False + tval = self.__timeout + + def browse_cb( + sd_hdl, + flags, + interface_idx, + error_code, + service_name, + regtype, + reply_domain, + ): + if error_code != pybonjour.kDNSServiceErr_NoError: + return + + if not (flags & pybonjour.kDNSServiceFlagsAdd): + return + + self._resolve_server( + interface_idx, error_code, service_name, regtype, reply_domain + ) + + try: + sd_hdl = pybonjour.DNSServiceBrowse( + regtype=self.__service, callBack=browse_cb + ) + except pybonjour.BonjourError as e: + errstr = "mDNS Service Browse Failed: {0}\n".format(e.args[0][1]) + raise tx.mDNSException(errstr) + + try: + while not timedout: + avail = select.select([sd_hdl], [], [], tval) + if sd_hdl in avail[0]: + pybonjour.DNSServiceProcessResult(sd_hdl) + tval = 0 + else: + timedout = True + except select.error as e: + errstr = "Select failed: {0}\n".format(e.args[1]) + raise tx.mDNSException(errstr) + except pybonjour.BonjourError as e: + errstr = "mDNS Process Result failed: {0}\n".format(e.args[0][1]) + raise tx.mDNSException(errstr) + finally: + sd_hdl.close() + + def _resolve_server(self, if_idx, ec, service_name, regtype, reply_domain): + """Invoked to resolve mDNS information about a service that + was discovered by a Browse call.""" + + timedout = False + tval = self.__timeout + + def resolve_cb( + sd_hdl, + flags, + interface_idx, + error_code, + full_name, + host_target, + port, + txt_record, + ): + if error_code != pybonjour.kDNSServiceErr_NoError: + return + + tr = pybonjour.TXTRecord.parse(txt_record) + if "url" in tr: + url = tr["url"] + if not misc.valid_pub_url(url): + return + self._mirrors.append(pub.RepositoryURI(url)) + + try: + sd_hdl = pybonjour.DNSServiceResolve( + 0, if_idx, service_name, regtype, reply_domain, resolve_cb + ) + except pybonjour.BonjourError as e: + errstr = "mDNS Service Resolve Failed: {0}\n".format(e.args[0][1]) + raise tx.mDNSException(errstr) + + try: + while not timedout: + avail = select.select([sd_hdl], [], [], tval) + if sd_hdl in avail[0]: + pybonjour.DNSServiceProcessResult(sd_hdl) + tval = 0 + else: + timedout = True + except select.error as e: + errstr = "Select failed; {0}\n".format(e.args[1]) + raise tx.mDNSException(errstr) + except pybonjour.BonjourError as e: + errstr = "mDNS Process Result Failed: {0}\n".format(e.args[0][1]) + raise tx.mDNSException(errstr) + finally: + sd_hdl.close() + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/repo.py b/src/modules/client/transport/repo.py index 652b6e3fa..8b136f81e 100644 --- a/src/modules/client/transport/repo.py +++ b/src/modules/client/transport/repo.py @@ -35,8 +35,14 @@ from email.utils import formatdate from six.moves import cStringIO, http_client -from six.moves.urllib.parse import quote, urlencode, urlsplit, urlparse, \ - urlunparse, urljoin +from six.moves.urllib.parse import ( + quote, + urlencode, + urlsplit, + urlparse, + urlunparse, + urljoin, +) from six.moves.urllib.request import url2pathname, pathname2url import pkg @@ -52,2574 +58,2916 @@ from pkg.misc import N_, compute_compressed_attrs, EmptyDict + class TransportRepo(object): - """The TransportRepo class handles transport requests. - It represents a repo, and provides the same interfaces as - the operations that are performed against a repo. Subclasses - should implement protocol specific repo modifications.""" + """The TransportRepo class handles transport requests. + It represents a repo, and provides the same interfaces as + the operations that are performed against a repo. Subclasses + should implement protocol specific repo modifications.""" + + def do_search(self, data, header=None, ccancel=None, pub=None): + """Perform a search request.""" + + raise NotImplementedError + + def get_catalog(self, ts=None, header=None, ccancel=None, pub=None): + """Get the catalog from the repo. If ts is defined, + request only changes newer than timestamp ts.""" + + raise NotImplementedError + + def get_catalog1( + self, + filelist, + destloc, + header=None, + ts=None, + progtrack=None, + pub=None, + revalidate=False, + redownload=False, + ): + """Get the files that make up the catalog components + that are listed in 'filelist'. Download the files to + the directory specified in 'destloc'. The caller + may optionally specify a dictionary with header + elements in 'header'. If a conditional get is + to be performed, 'ts' should contain a floating point + value of seconds since the epoch. + + Revalidate and redownload are used to control upstream + caching behavior, for protocols that support caching. (HTTP)""" - def do_search(self, data, header=None, ccancel=None, pub=None): - """Perform a search request.""" + raise NotImplementedError - raise NotImplementedError + def get_datastream( + self, fhash, version, header=None, ccancel=None, pub=None + ): + """Get a datastream from a repo. The name of the + file is given in fhash.""" - def get_catalog(self, ts=None, header=None, ccancel=None, pub=None): - """Get the catalog from the repo. If ts is defined, - request only changes newer than timestamp ts.""" + raise NotImplementedError - raise NotImplementedError + def get_files( + self, filelist, dest, progtrack, version, header=None, pub=None + ): + """Get multiple files from the repo at once. + The files are named by hash and supplied in filelist. + If dest is specified, download to the destination + directory that is given. Progtrack is a ProgressTracker""" - def get_catalog1(self, filelist, destloc, header=None, ts=None, - progtrack=None, pub=None, revalidate=False, redownload=False): - """Get the files that make up the catalog components - that are listed in 'filelist'. Download the files to - the directory specified in 'destloc'. The caller - may optionally specify a dictionary with header - elements in 'header'. If a conditional get is - to be performed, 'ts' should contain a floating point - value of seconds since the epoch. + raise NotImplementedError - Revalidate and redownload are used to control upstream - caching behavior, for protocols that support caching. (HTTP)""" + def get_manifest(self, fmri, header=None, ccancel=None, pub=None): + """Get a manifest from repo. The name of the + package is given in fmri. If dest is set, download + the manifest to dest.""" - raise NotImplementedError + raise NotImplementedError - def get_datastream(self, fhash, version, header=None, ccancel=None, pub=None): - """Get a datastream from a repo. The name of the - file is given in fhash.""" + def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): + """Get manifests named in list. The mfstlist argument contains + tuples (fmri, header). This is so that each manifest may have + unique header information. The destination directory is spec- + ified in the dest argument.""" - raise NotImplementedError + raise NotImplementedError - def get_files(self, filelist, dest, progtrack, version, header=None, pub=None): - """Get multiple files from the repo at once. - The files are named by hash and supplied in filelist. - If dest is specified, download to the destination - directory that is given. Progtrack is a ProgressTracker""" + def get_publisherinfo(self, header=None, ccancel=None): + """Get publisher configuration information from the + repository.""" - raise NotImplementedError + raise NotImplementedError - def get_manifest(self, fmri, header=None, ccancel=None, pub=None): - """Get a manifest from repo. The name of the - package is given in fmri. If dest is set, download - the manifest to dest.""" + def get_status(self, header=None, ccancel=None): + """Get status from the repository.""" - raise NotImplementedError + raise NotImplementedError - def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): - """Get manifests named in list. The mfstlist argument contains - tuples (fmri, header). This is so that each manifest may have - unique header information. The destination directory is spec- - ified in the dest argument.""" + def get_url(self): + """Return's the Repo's URL.""" - raise NotImplementedError + raise NotImplementedError - def get_publisherinfo(self, header=None, ccancel=None): - """Get publisher configuration information from the - repository.""" + def get_repouri_key(self): + """Returns the repo's RepositoryURI.""" - raise NotImplementedError + return NotImplementedError - def get_status(self, header=None, ccancel=None): - """Get status from the repository.""" + def get_versions(self, header=None, ccancel=None): + """Query the repo for versions information. + Returns a fileobject.""" - raise NotImplementedError + raise NotImplementedError - def get_url(self): - """Return's the Repo's URL.""" + def publish_add(self, action, header=None, progtrack=None, trans_id=None): + """The publish operation that adds content to a repository. + The action must be populated with a data property. + Callers may supply a header, and should supply a transaction + id in trans_id.""" - raise NotImplementedError + raise NotImplementedError - def get_repouri_key(self): - """Returns the repo's RepositoryURI.""" + def publish_add_file( + self, pth, header=None, trans_id=None, basename=None, progtrack=None + ): + raise NotImplementedError - return NotImplementedError + def publish_add_manifest(self, pth, header=None, trans_id=None): + raise NotImplementedError - def get_versions(self, header=None, ccancel=None): - """Query the repo for versions information. - Returns a fileobject.""" + def publish_abandon(self, header=None, trans_id=None): + """The 'abandon' publication operation, that tells a + Repository to abort the current transaction. The caller + must specify the transaction id in trans_id. Returns + a (publish-state, fmri) tuple.""" - raise NotImplementedError + raise NotImplementedError - def publish_add(self, action, header=None, progtrack=None, - trans_id=None): - """The publish operation that adds content to a repository. - The action must be populated with a data property. - Callers may supply a header, and should supply a transaction - id in trans_id.""" + def publish_close(self, header=None, trans_id=None, add_to_catalog=False): + """The close operation tells the Repository to commit + the transaction identified by trans_id. The caller may + specify add_to_catalog, if needed. This method returns a + (publish-state, fmri) tuple.""" - raise NotImplementedError + raise NotImplementedError - def publish_add_file(self, pth, header=None, trans_id=None, - basename=None, progtrack=None): - raise NotImplementedError + def publish_open(self, header=None, client_release=None, pkg_name=None): + """Begin a publication operation by calling 'open'. + The caller must specify the client's OS release in + client_release, and the package's name in pkg_name. + Returns a transaction-ID.""" - def publish_add_manifest(self, pth, header=None, trans_id=None): - raise NotImplementedError + raise NotImplementedError - def publish_abandon(self, header=None, trans_id=None): - """The 'abandon' publication operation, that tells a - Repository to abort the current transaction. The caller - must specify the transaction id in trans_id. Returns - a (publish-state, fmri) tuple.""" + def publish_rebuild(self, header=None, pub=None): + """Attempt to rebuild the package data and search data in the + repository.""" - raise NotImplementedError + raise NotImplementedError - def publish_close(self, header=None, trans_id=None, - add_to_catalog=False): - """The close operation tells the Repository to commit - the transaction identified by trans_id. The caller may - specify add_to_catalog, if needed. This method returns a - (publish-state, fmri) tuple.""" + def publish_rebuild_indexes(self, header=None, pub=None): + """Attempt to rebuild the search data in the repository.""" - raise NotImplementedError + raise NotImplementedError - def publish_open(self, header=None, client_release=None, pkg_name=None): - """Begin a publication operation by calling 'open'. - The caller must specify the client's OS release in - client_release, and the package's name in pkg_name. - Returns a transaction-ID.""" + def publish_rebuild_packages(self, header=None, pub=None): + """Attempt to rebuild the package data in the repository.""" - raise NotImplementedError + raise NotImplementedError - def publish_rebuild(self, header=None, pub=None): - """Attempt to rebuild the package data and search data in the - repository.""" + def publish_refresh(self, header=None, pub=None): + """Attempt to refresh the package data and search data in the + repository.""" - raise NotImplementedError + raise NotImplementedError - def publish_rebuild_indexes(self, header=None, pub=None): - """Attempt to rebuild the search data in the repository.""" + def publish_refresh_indexes(self, header=None, pub=None): + """Attempt to refresh the search data in the repository.""" - raise NotImplementedError + raise NotImplementedError - def publish_rebuild_packages(self, header=None, pub=None): - """Attempt to rebuild the package data in the repository.""" + def publish_refresh_packages(self, header=None, pub=None): + """Attempt to refresh the package data in the repository.""" - raise NotImplementedError + raise NotImplementedError - def publish_refresh(self, header=None, pub=None): - """Attempt to refresh the package data and search data in the - repository.""" + def touch_manifest(self, fmri, header=None, ccancel=None, pub=None): + """Send data about operation intent without actually + downloading a manifest.""" - raise NotImplementedError + raise NotImplementedError - def publish_refresh_indexes(self, header=None, pub=None): - """Attempt to refresh the search data in the repository.""" + def get_compressed_attrs( + self, fhash, header=None, pub=None, trans_id=None, hashes=True + ): + """Given a fhash, returns a tuple of (csize, chashes) where + 'csize' is the size of the file in the repository and 'chashes' + is a dictionary containing any hashes of the compressed data + known by the repository. If the repository cannot provide the + hash information or 'hashes' is False, chashes will be an empty + dictionary. If the repository does not have the file, a tuple + of (None, None) will be returned instead.""" - raise NotImplementedError + raise NotImplementedError - def publish_refresh_packages(self, header=None, pub=None): - """Attempt to refresh the package data in the repository.""" + def build_refetch_header(self, header): + """Based on existing header contents, build a header that + should be used for a subsequent retry when fetching content + from the repository.""" + + raise NotImplementedError + + @staticmethod + def _annotate_exceptions(errors, mapping=None): + """Walk a list of transport errors, examine the + url, and add a field that names the request. This request + information is derived from the URL.""" + + for e in errors: + if not e.url: + # Error may have been raised before request path + # was determined; nothing to annotate. + continue + + if not mapping: + # Request is basename of path portion of URI. + e.request = os.path.basename(urlsplit(e.url)[2]) + continue + + # If caller specified a mapping object, use that + # instead of trying to deduce the request's name. + if e.url not in mapping: + raise tx.TransportOperationError( + "No mapping found for URL {0}".format(e.url) + ) - raise NotImplementedError + e.request = mapping[e.url] - def touch_manifest(self, fmri, header=None, ccancel=None, pub=None): - """Send data about operation intent without actually - downloading a manifest.""" + return errors - raise NotImplementedError + @staticmethod + def _parse_html_error(content): + """Parse a html document that contains error information. + Return the html as a plain text string.""" - def get_compressed_attrs(self, fhash, header=None, pub=None, - trans_id=None, hashes=True): - """Given a fhash, returns a tuple of (csize, chashes) where - 'csize' is the size of the file in the repository and 'chashes' - is a dictionary containing any hashes of the compressed data - known by the repository. If the repository cannot provide the - hash information or 'hashes' is False, chashes will be an empty - dictionary. If the repository does not have the file, a tuple - of (None, None) will be returned instead.""" + msg = None + if not content: + return msg - raise NotImplementedError + from xml.dom.minidom import Document, parse - def build_refetch_header(self, header): - """Based on existing header contents, build a header that - should be used for a subsequent retry when fetching content - from the repository.""" + dom = parse(cStringIO(content)) + msg = "" - raise NotImplementedError + paragraphs = [] + if not isinstance(dom, Document): + # Assume the output was the message. + msg = content + else: + paragraphs = dom.getElementsByTagName("p") - @staticmethod - def _annotate_exceptions(errors, mapping=None): - """Walk a list of transport errors, examine the - url, and add a field that names the request. This request - information is derived from the URL.""" + # XXX this is specific to the depot server's current + # error output style. + for p in paragraphs: + for c in p.childNodes: + if c.nodeType == c.TEXT_NODE: + value = c.nodeValue + if value is not None: + msg += "\n{0}".format(value) - for e in errors: - if not e.url: - # Error may have been raised before request path - # was determined; nothing to annotate. - continue + return msg - if not mapping: - # Request is basename of path portion of URI. - e.request = os.path.basename(urlsplit( - e.url)[2]) - continue + @staticmethod + def _url_to_request(urllist, mapping=None): + """Take a list of urls and remove the protocol information, + leaving just the information about the request.""" - # If caller specified a mapping object, use that - # instead of trying to deduce the request's name. - if e.url not in mapping: - raise tx.TransportOperationError( - "No mapping found for URL {0}".format( - e.url)) + reqlist = [] - e.request = mapping[e.url] + for u in urllist: + if not mapping: + utup = urlsplit(u) + req = utup[2] + req = os.path.basename(req) + reqlist.append(req) + continue - return errors + if u not in mapping: + raise tx.TransportOperationError( + "No mapping found for URL {0}".format(u) + ) - @staticmethod - def _parse_html_error(content): - """Parse a html document that contains error information. - Return the html as a plain text string.""" + req = mapping[u] + reqlist.append(req) - msg = None - if not content: - return msg + return reqlist - from xml.dom.minidom import Document, parse - dom = parse(cStringIO(content)) - msg = "" + @staticmethod + def _analyze_server_error(error_header): + """Decode the X-Ipkg-Error header which is appended by the + module doing entitlement checks on the server side. Let the user + know why they can't access the repository.""" - paragraphs = [] - if not isinstance(dom, Document): - # Assume the output was the message. - msg = content - else: - paragraphs = dom.getElementsByTagName("p") + ENTITLEMENT_ERROR = "ENT" + LICENSE_ERROR = "LIC" + SERVER_ERROR = "SVR" + MAINTENANCE = "MNT" - # XXX this is specific to the depot server's current - # error output style. - for p in paragraphs: - for c in p.childNodes: - if c.nodeType == c.TEXT_NODE: - value = c.nodeValue - if value is not None: - msg += ("\n{0}".format(value)) - - return msg - - @staticmethod - def _url_to_request(urllist, mapping=None): - """Take a list of urls and remove the protocol information, - leaving just the information about the request.""" - - reqlist = [] - - for u in urllist: - - if not mapping: - utup = urlsplit(u) - req = utup[2] - req = os.path.basename(req) - reqlist.append(req) - continue - - if u not in mapping: - raise tx.TransportOperationError( - "No mapping found for URL {0}".format(u)) - - req = mapping[u] - reqlist.append(req) - - return reqlist - - @staticmethod - def _analyze_server_error(error_header): - """ Decode the X-Ipkg-Error header which is appended by the - module doing entitlement checks on the server side. Let the user - know why they can't access the repository. """ - - ENTITLEMENT_ERROR = "ENT" - LICENSE_ERROR = "LIC" - SERVER_ERROR = "SVR" - MAINTENANCE = "MNT" - - entitlement_err_msg = N_(""" + entitlement_err_msg = N_( + """ This account is not entitled to access this repository. Ensure that the correct certificate is being used and that the support contract for the product being accessed is still valid. -""") +""" + ) - license_err_msg = N_(""" + license_err_msg = N_( + """ The license agreement required to access this repository has not been accepted yet or the license agreement for the product has changed. Please go to https://pkg-register.oracle.com and accept the license for the product you are trying to access. -""") +""" + ) - server_err_msg = N_(""" + server_err_msg = N_( + """ Repository access is currently unavailable due to service issues. Please retry later or contact your customer service representative. -""") +""" + ) - maintenance_msg = N_(""" + maintenance_msg = N_( + """ Repository access rights can currently not be verified due to server maintenance. Please retry later. -""") - msg = "" +""" + ) + msg = "" - # multiple errors possible (e.g. license and entitlement not ok) - error_codes = error_header.split(",") + # multiple errors possible (e.g. license and entitlement not ok) + error_codes = error_header.split(",") - for e in error_codes: - code = e.strip().upper() + for e in error_codes: + code = e.strip().upper() - if code == ENTITLEMENT_ERROR: - msg += _(entitlement_err_msg) - elif code == LICENSE_ERROR: - msg += _(license_err_msg) - elif code == SERVER_ERROR: - msg += _(server_err_msg) - elif code == MAINTENANCE: - msg += _(maintenance_msg) + if code == ENTITLEMENT_ERROR: + msg += _(entitlement_err_msg) + elif code == LICENSE_ERROR: + msg += _(license_err_msg) + elif code == SERVER_ERROR: + msg += _(server_err_msg) + elif code == MAINTENANCE: + msg += _(maintenance_msg) - if msg == "": - return None + if msg == "": + return None - return msg + return msg class HTTPRepo(TransportRepo): - - def __init__(self, repostats, repouri, engine): - """Create a http repo. Repostats is a RepoStats object. - Repouri is a TransportRepoURI object. Engine is a transport - engine object. - - The convenience function new_repo() can be used to create - the correct repo.""" - self._url = repostats.url - self._repouri = repouri - self._engine = engine - self._verdata = None - - def __str__(self): - return "HTTPRepo url: {0} repouri: {1}".format(self._url, - self._repouri) - - def _add_file_url(self, url, filepath=None, progclass=None, - progtrack=None, header=None, compress=False): - self._engine.add_url(url, filepath=filepath, - progclass=progclass, progtrack=progtrack, repourl=self._url, - header=header, compressible=compress, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) - - def _fetch_url(self, url, header=None, compress=False, ccancel=None, - failonerror=True, system=False): - return self._engine.get_url(url, header, repourl=self._url, - compressible=compress, ccancel=ccancel, - failonerror=failonerror, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy, system=system) - - def _fetch_url_header(self, url, header=None, ccancel=None, - failonerror=True): - return self._engine.get_url_header(url, header, - repourl=self._url, ccancel=ccancel, - failonerror=failonerror, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) - - def _post_url(self, url, data=None, header=None, ccancel=None, - data_fobj=None, data_fp=None, failonerror=True, progclass=None, - progtrack=None): - return self._engine.send_data(url, data=data, header=header, - repourl=self._url, ccancel=ccancel, - data_fobj=data_fobj, data_fp=data_fp, - failonerror=failonerror, progclass=progclass, - progtrack=progtrack, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) - - def __check_response_body(self, fobj): - """Parse the response body found accessible using the provided - filestream object and raise an exception if appropriate.""" - - try: - fobj.free_buffer = False - fobj.read() - except tx.TransportProtoError as e: - if e.code == http_client.BAD_REQUEST: - exc_type, exc_value, exc_tb = sys.exc_info() - try: - e.details = self._parse_html_error( - fobj.read()) - # six.reraise requires the first argument - # callable if the second argument is None. - # Also the traceback is automatically attached, - # in Python 3, so we can simply raise it. - except: - # If parse fails, raise original - # exception. - if six.PY2: - six.reraise(exc_value, None, - exc_tb) - else: - raise exc_value - raise - finally: - fobj.close() - - def add_version_data(self, verdict): - """Cache the information about what versions a repository - supports.""" - - self._verdata = verdict - - def __get_request_url(self, methodstr, query=None, pub=None): - """Generate the request URL for the given method and - publisher. - """ - - base = self._repouri.uri - - # Only append the publisher prefix if the publisher of the - # request is known, not already part of the URI, if this isn't - # an open operation, and if the repository supports version 1 - # of the publisher operation. The prefix shouldn't be appended - # for open because the publisher may not yet be known to the - # repository, and not in other cases because the repository - # doesn't support it. - pub_prefix = getattr(pub, "prefix", None) - if pub_prefix and not methodstr.startswith("open/") and \ - not base.endswith("/{0}/".format(pub_prefix)) and \ - self.supports_version("publisher", [1]) > -1: - # Append the publisher prefix to the repository URL. - base = urljoin(base, pub_prefix) + "/" - - uri = urljoin(base, methodstr) - if not query: - return uri - - # If a set of query data was provided, then decompose the URI - # into its component parts and replace the query portion with - # the encoded version of the new query data. - components = list(urlparse(uri)) - components[4] = urlencode(query) - return urlunparse(components) - - def do_search(self, data, header=None, ccancel=None, pub=None): - """Perform a remote search against origin repos.""" - - requesturl = self.__get_request_url("search/1/", pub=pub) - if len(data) > 1: - # Post and retrieve. - request_data = urlencode( - [(i, str(q)) for i, q in enumerate(data)]) - return self._post_url(requesturl, request_data, - header, ccancel=ccancel) - - # Retrieval only. - requesturl = urljoin(requesturl, quote( - str(data[0]), safe='')) - return self._fetch_url(requesturl, header, ccancel=ccancel) - - def get_catalog(self, ts=None, header=None, ccancel=None, pub=None): - """Get the catalog from the repo. If ts is defined, - request only changes newer than timestamp ts.""" - - requesturl = self.__get_request_url("catalog/0/", pub=pub) - if ts: - if not header: - header = {"If-Modified-Since": ts} - else: - header["If-Modified-Since"] = ts - - return self._fetch_url(requesturl, header, compress=True, - ccancel=ccancel) - - def get_catalog1(self, filelist, destloc, header=None, ts=None, - progtrack=None, pub=None, revalidate=False, redownload=False): - """Get the files that make up the catalog components - that are listed in 'filelist'. Download the files to - the directory specified in 'destloc'. The caller - may optionally specify a dictionary with header - elements in 'header'. If a conditional get is - to be performed, 'ts' should contain a floating point - value of seconds since the epoch. - - If 'redownload' or 'revalidate' is set, cache control - headers are appended to the request. Re-download - uses http's no-cache header, while revalidate uses - max-age=0.""" - - baseurl = self.__get_request_url("catalog/1/", pub=pub) - urllist = [] - progclass = None - headers = {} - - if redownload and revalidate: - raise ValueError("Either revalidate or redownload" - " may be used, but not both.") - if ts: - # Convert date to RFC 1123 compliant string - tsstr = formatdate(timeval=ts, localtime=False, - usegmt=True) - headers["If-Modified-Since"] = tsstr - if revalidate: - headers["Cache-Control"] = "max-age=0" - if redownload: - headers["Cache-Control"] = "no-cache" - headers["Pragma"] = "no-cache" - if header: - headers.update(header) - if progtrack: - progclass = CatalogProgress - - for f in filelist: - url = urljoin(baseurl, f) - urllist.append(url) - fn = os.path.join(destloc, f) - self._add_file_url(url, filepath=fn, header=headers, - compress=True, progtrack=progtrack, - progclass=progclass) - - try: - while self._engine.pending: - self._engine.run() - except tx.ExcessiveTransientFailure as e: - # Attach a list of failed and successful - # requests to this exception. - errors, success = self._engine.check_status(urllist, - True) - - errors = self._annotate_exceptions(errors) - success = self._url_to_request(success) - e.failures = errors - e.success = success - - # Reset the engine before propagating exception. - self._engine.reset() - raise - - errors = self._engine.check_status(urllist) - - # Transient errors are part of standard control flow. - # The repo's caller will look at these and decide whether - # to throw them or not. Permanent failures are raised - # by the transport engine as soon as they occur. - # - # This adds an attribute that describes the request to the - # exception, if we were able to figure it out. - - return self._annotate_exceptions(errors) - - def get_datastream(self, fhash, version, header=None, ccancel=None, - pub=None): - """Get a datastream from a repo. The name of the - file is given in fhash.""" - - # The only versions this operation is compatible with. - assert version == 0 or version == 1 - - baseurl = self.__get_request_url("file/{0}/".format(version), - pub=pub) - requesturl = urljoin(baseurl, fhash) - return self._fetch_url(requesturl, header, ccancel=ccancel) - - def get_publisherinfo(self, header=None, ccancel=None): - """Get publisher information from the repository.""" - - requesturl = self.__get_request_url("publisher/0/") - return self._fetch_url(requesturl, header, ccancel=ccancel) - - def get_syspub_info(self, header=None, ccancel=None): - """Get configuration from the system depot.""" - - requesturl = self.__get_request_url("syspub/0/") - # We set 'system=True' to cause the transport to override any - # $http_proxy environment variables. Syspub origins/mirrors - # that are normally proxied through the system-repository will - # have a proxy attached to their RepositoryURI, and the - # corresponding TransportRepoURI runtime_proxy value will be set - # to the same value, so we don't need to pass the 'system' - # kwarg in those cases. - return self._fetch_url(requesturl, header, ccancel=ccancel, - system=True) - - def get_status(self, header=None, ccancel=None): - """Get status/0 information from the repository.""" - - requesturl = self.__get_request_url("status/0") - return self._fetch_url(requesturl, header, ccancel=ccancel) - - def get_manifest(self, fmri, header=None, ccancel=None, pub=None): - """Get a package manifest from repo. The FMRI of the - package is given in fmri.""" - - mfst = fmri.get_url_path() - baseurl = self.__get_request_url("manifest/0/", pub=pub) - requesturl = urljoin(baseurl, mfst) - - return self._fetch_url(requesturl, header, compress=True, - ccancel=ccancel) - - def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): - """Get manifests named in list. The mfstlist argument contains - tuples (fmri, header). This is so that each manifest may have - unique header information. The destination directory is spec- - ified in the dest argument.""" - - baseurl = self.__get_request_url("manifest/0/", pub=pub) - urlmapping = {} - progclass = None - - if progtrack: - progclass = ManifestProgress - - for fmri, h in mfstlist: - f = fmri.get_url_path() - url = urljoin(baseurl, f) - urlmapping[url] = fmri - fn = os.path.join(dest, f) - self._add_file_url(url, filepath=fn, header=h, - compress=True, progtrack=progtrack, - progclass=progclass) - - # Compute urllist from keys in mapping - urllist = urlmapping.keys() - + def __init__(self, repostats, repouri, engine): + """Create a http repo. Repostats is a RepoStats object. + Repouri is a TransportRepoURI object. Engine is a transport + engine object. + + The convenience function new_repo() can be used to create + the correct repo.""" + self._url = repostats.url + self._repouri = repouri + self._engine = engine + self._verdata = None + + def __str__(self): + return "HTTPRepo url: {0} repouri: {1}".format(self._url, self._repouri) + + def _add_file_url( + self, + url, + filepath=None, + progclass=None, + progtrack=None, + header=None, + compress=False, + ): + self._engine.add_url( + url, + filepath=filepath, + progclass=progclass, + progtrack=progtrack, + repourl=self._url, + header=header, + compressible=compress, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) + + def _fetch_url( + self, + url, + header=None, + compress=False, + ccancel=None, + failonerror=True, + system=False, + ): + return self._engine.get_url( + url, + header, + repourl=self._url, + compressible=compress, + ccancel=ccancel, + failonerror=failonerror, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + system=system, + ) + + def _fetch_url_header( + self, url, header=None, ccancel=None, failonerror=True + ): + return self._engine.get_url_header( + url, + header, + repourl=self._url, + ccancel=ccancel, + failonerror=failonerror, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) + + def _post_url( + self, + url, + data=None, + header=None, + ccancel=None, + data_fobj=None, + data_fp=None, + failonerror=True, + progclass=None, + progtrack=None, + ): + return self._engine.send_data( + url, + data=data, + header=header, + repourl=self._url, + ccancel=ccancel, + data_fobj=data_fobj, + data_fp=data_fp, + failonerror=failonerror, + progclass=progclass, + progtrack=progtrack, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) + + def __check_response_body(self, fobj): + """Parse the response body found accessible using the provided + filestream object and raise an exception if appropriate.""" + + try: + fobj.free_buffer = False + fobj.read() + except tx.TransportProtoError as e: + if e.code == http_client.BAD_REQUEST: + exc_type, exc_value, exc_tb = sys.exc_info() try: - while self._engine.pending: - self._engine.run() - except tx.ExcessiveTransientFailure as e: - # Attach a list of failed and successful - # requests to this exception. - errors, success = self._engine.check_status(urllist, - True) - - errors = self._annotate_exceptions(errors, urlmapping) - success = self._url_to_request(success, urlmapping) - e.failures = errors - e.success = success - - # Reset the engine before propagating exception. - self._engine.reset() - raise - - errors = self._engine.check_status(urllist) - - # Transient errors are part of standard control flow. - # The repo's caller will look at these and decide whether - # to throw them or not. Permanent failures are raised - # by the transport engine as soon as they occur. - # - # This adds an attribute that describes the request to the - # exception, if we were able to figure it out. - - return self._annotate_exceptions(errors, urlmapping) - - def get_files(self, filelist, dest, progtrack, version, header=None, pub=None): - """Get multiple files from the repo at once. - The files are named by hash and supplied in filelist. - If dest is specified, download to the destination - directory that is given. If progtrack is not None, - it contains a ProgressTracker object for the - downloads.""" - - baseurl = self.__get_request_url("file/{0}/".format(version), - pub=pub) - urllist = [] - progclass = None - - if progtrack: - progclass = FileProgress - - for f in filelist: - url = urljoin(baseurl, f) - urllist.append(url) - fn = os.path.join(dest, f) - self._add_file_url(url, filepath=fn, - progclass=progclass, progtrack=progtrack, - header=header) - - try: - while self._engine.pending: - self._engine.run() - except tx.ExcessiveTransientFailure as e: - # Attach a list of failed and successful - # requests to this exception. - errors, success = self._engine.check_status(urllist, - True) - - errors = self._annotate_exceptions(errors) - success = self._url_to_request(success) - e.failures = errors - e.success = success - - # Reset the engine before propagating exception. - self._engine.reset() - raise - - errors = self._engine.check_status(urllist) - - # Transient errors are part of standard control flow. - # The repo's caller will look at these and decide whether - # to throw them or not. Permanent failures are raised - # by the transport engine as soon as they occur. - # - # This adds an attribute that describes the request to the - # exception, if we were able to figure it out. - - return self._annotate_exceptions(errors) - - def get_url(self): - """Returns the repo's url.""" - - return self._url - - def get_repouri_key(self): - """Returns the repo's TransportRepoURI key, used to uniquely - identify this TransportRepoURI.""" - - return self._repouri.key() - - def get_versions(self, header=None, ccancel=None): - """Query the repo for versions information. - Returns a fileobject. If server returns 401 (Unauthorized) - check for presence of X-IPkg-Error header and decode.""" - - requesturl = self.__get_request_url("versions/0/") - fobj = self._fetch_url(requesturl, header, ccancel=ccancel, - failonerror=False) - - try: - # Bogus request to trigger - # StreamingFileObj.__fill_buffer(), otherwise the - # TransportProtoError won't be raised here. We can't - # use .read() since this will empty the data buffer. - fobj.getheader("octopus", None) - except tx.TransportProtoError as e: - if e.code == http_client.UNAUTHORIZED: - exc_type, exc_value, exc_tb = sys.exc_info() - try: - e.details = self._analyze_server_error( - fobj.getheader("X-IPkg-Error", - None)) - except: - # If analysis fails, raise original - # exception. - if six.PY2: - six.reraise(exc_value, None, - exc_tb) - else: - raise exc_value - raise - return fobj - - def has_version_data(self): - """Returns true if this repo knows its version information.""" - - return self._verdata is not None - - def publish_add(self, action, header=None, progtrack=None, - trans_id=None): - """The publish operation that adds content to a repository. - The action must be populated with a data property. - Callers may supply a header, and should supply a transaction - id in trans_id.""" - - attrs = action.attrs - data_fobj = None - data = None - progclass = None - - if progtrack: - progclass = FileProgress - - baseurl = self.__get_request_url("add/0/") - request_str = "{0}/{1}".format(trans_id, action.name) - requesturl = urljoin(baseurl, request_str) - - if action.data: - data_fobj = action.data() - else: - data = "" - - headers = dict( - ("X-IPkg-SetAttr{0}".format(i), "{0}={1}".format(k, - attrs[k])) - for i, k in enumerate(attrs) - ) - - if header: - headers.update(header) - - fobj = self._post_url(requesturl, header=headers, - data_fobj=data_fobj, data=data, failonerror=False, - progclass=progclass, progtrack=progtrack) - self.__check_response_body(fobj) - - def publish_add_file(self, pth, header=None, trans_id=None, - basename=None, progtrack=None): - """The publish operation that adds content to a repository. - Callers may supply a header, and should supply a transaction - id in trans_id.""" - - attrs = {} - progclass = None - - if progtrack: - progclass = FileProgress - - if basename: - attrs["basename"] = basename - - baseurl = self.__get_request_url("file/1/") - requesturl = urljoin(baseurl, trans_id) - - headers = dict( - ("X-IPkg-SetAttr{0}".format(i), "{0}={1}".format(k, attrs[k])) - for i, k in enumerate(attrs) - ) - - if header: - headers.update(header) - - fobj = self._post_url(requesturl, header=headers, data_fp=pth, - progclass=progclass, progtrack=progtrack) - self.__check_response_body(fobj) - - def publish_add_manifest(self, pth, header=None, trans_id=None): - """The publish operation that adds content to a repository. - Callers may supply a header, and should supply a transaction - id in trans_id.""" - - baseurl = self.__get_request_url("manifest/1/") - requesturl = urljoin(baseurl, trans_id) - # Compress the manifest for the HTTPRepo case. - size = int(os.path.getsize(pth)) - with open(pth, "rb") as f: - data = f.read() - basename = os.path.basename(pth) + ".gz" - dirname = os.path.dirname(pth) - pathname = os.path.join(dirname, basename) - compute_compressed_attrs(basename, - data=data, size=size, compress_dir=dirname) - - headers = {} - if header: - headers.update(header) - - fobj = self._post_url(requesturl, header=header, - data_fp=pathname) - self.__check_response_body(fobj) - - def publish_abandon(self, header=None, trans_id=None): - """The 'abandon' publication operation, that tells a - Repository to abort the current transaction. The caller - must specify the transaction id in trans_id. Returns - a (publish-state, fmri) tuple.""" - - baseurl = self.__get_request_url("abandon/0/") - requesturl = urljoin(baseurl, trans_id) - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) + e.details = self._parse_html_error(fobj.read()) + # six.reraise requires the first argument + # callable if the second argument is None. + # Also the traceback is automatically attached, + # in Python 3, so we can simply raise it. + except: + # If parse fails, raise original + # exception. + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + raise + finally: + fobj.close() + + def add_version_data(self, verdict): + """Cache the information about what versions a repository + supports.""" + + self._verdata = verdict + + def __get_request_url(self, methodstr, query=None, pub=None): + """Generate the request URL for the given method and + publisher. + """ + base = self._repouri.uri + + # Only append the publisher prefix if the publisher of the + # request is known, not already part of the URI, if this isn't + # an open operation, and if the repository supports version 1 + # of the publisher operation. The prefix shouldn't be appended + # for open because the publisher may not yet be known to the + # repository, and not in other cases because the repository + # doesn't support it. + pub_prefix = getattr(pub, "prefix", None) + if ( + pub_prefix + and not methodstr.startswith("open/") + and not base.endswith("/{0}/".format(pub_prefix)) + and self.supports_version("publisher", [1]) > -1 + ): + # Append the publisher prefix to the repository URL. + base = urljoin(base, pub_prefix) + "/" + + uri = urljoin(base, methodstr) + if not query: + return uri + + # If a set of query data was provided, then decompose the URI + # into its component parts and replace the query portion with + # the encoded version of the new query data. + components = list(urlparse(uri)) + components[4] = urlencode(query) + return urlunparse(components) + + def do_search(self, data, header=None, ccancel=None, pub=None): + """Perform a remote search against origin repos.""" + + requesturl = self.__get_request_url("search/1/", pub=pub) + if len(data) > 1: + # Post and retrieve. + request_data = urlencode([(i, str(q)) for i, q in enumerate(data)]) + return self._post_url( + requesturl, request_data, header, ccancel=ccancel + ) + + # Retrieval only. + requesturl = urljoin(requesturl, quote(str(data[0]), safe="")) + return self._fetch_url(requesturl, header, ccancel=ccancel) + + def get_catalog(self, ts=None, header=None, ccancel=None, pub=None): + """Get the catalog from the repo. If ts is defined, + request only changes newer than timestamp ts.""" + + requesturl = self.__get_request_url("catalog/0/", pub=pub) + if ts: + if not header: + header = {"If-Modified-Since": ts} + else: + header["If-Modified-Since"] = ts + + return self._fetch_url( + requesturl, header, compress=True, ccancel=ccancel + ) + + def get_catalog1( + self, + filelist, + destloc, + header=None, + ts=None, + progtrack=None, + pub=None, + revalidate=False, + redownload=False, + ): + """Get the files that make up the catalog components + that are listed in 'filelist'. Download the files to + the directory specified in 'destloc'. The caller + may optionally specify a dictionary with header + elements in 'header'. If a conditional get is + to be performed, 'ts' should contain a floating point + value of seconds since the epoch. + + If 'redownload' or 'revalidate' is set, cache control + headers are appended to the request. Re-download + uses http's no-cache header, while revalidate uses + max-age=0.""" + + baseurl = self.__get_request_url("catalog/1/", pub=pub) + urllist = [] + progclass = None + headers = {} + + if redownload and revalidate: + raise ValueError( + "Either revalidate or redownload" " may be used, but not both." + ) + if ts: + # Convert date to RFC 1123 compliant string + tsstr = formatdate(timeval=ts, localtime=False, usegmt=True) + headers["If-Modified-Since"] = tsstr + if revalidate: + headers["Cache-Control"] = "max-age=0" + if redownload: + headers["Cache-Control"] = "no-cache" + headers["Pragma"] = "no-cache" + if header: + headers.update(header) + if progtrack: + progclass = CatalogProgress + + for f in filelist: + url = urljoin(baseurl, f) + urllist.append(url) + fn = os.path.join(destloc, f) + self._add_file_url( + url, + filepath=fn, + header=headers, + compress=True, + progtrack=progtrack, + progclass=progclass, + ) + + try: + while self._engine.pending: + self._engine.run() + except tx.ExcessiveTransientFailure as e: + # Attach a list of failed and successful + # requests to this exception. + errors, success = self._engine.check_status(urllist, True) + + errors = self._annotate_exceptions(errors) + success = self._url_to_request(success) + e.failures = errors + e.success = success + + # Reset the engine before propagating exception. + self._engine.reset() + raise + + errors = self._engine.check_status(urllist) + + # Transient errors are part of standard control flow. + # The repo's caller will look at these and decide whether + # to throw them or not. Permanent failures are raised + # by the transport engine as soon as they occur. + # + # This adds an attribute that describes the request to the + # exception, if we were able to figure it out. + + return self._annotate_exceptions(errors) + + def get_datastream( + self, fhash, version, header=None, ccancel=None, pub=None + ): + """Get a datastream from a repo. The name of the + file is given in fhash.""" + + # The only versions this operation is compatible with. + assert version == 0 or version == 1 + + baseurl = self.__get_request_url("file/{0}/".format(version), pub=pub) + requesturl = urljoin(baseurl, fhash) + return self._fetch_url(requesturl, header, ccancel=ccancel) + + def get_publisherinfo(self, header=None, ccancel=None): + """Get publisher information from the repository.""" + + requesturl = self.__get_request_url("publisher/0/") + return self._fetch_url(requesturl, header, ccancel=ccancel) + + def get_syspub_info(self, header=None, ccancel=None): + """Get configuration from the system depot.""" + + requesturl = self.__get_request_url("syspub/0/") + # We set 'system=True' to cause the transport to override any + # $http_proxy environment variables. Syspub origins/mirrors + # that are normally proxied through the system-repository will + # have a proxy attached to their RepositoryURI, and the + # corresponding TransportRepoURI runtime_proxy value will be set + # to the same value, so we don't need to pass the 'system' + # kwarg in those cases. + return self._fetch_url(requesturl, header, ccancel=ccancel, system=True) + + def get_status(self, header=None, ccancel=None): + """Get status/0 information from the repository.""" + + requesturl = self.__get_request_url("status/0") + return self._fetch_url(requesturl, header, ccancel=ccancel) + + def get_manifest(self, fmri, header=None, ccancel=None, pub=None): + """Get a package manifest from repo. The FMRI of the + package is given in fmri.""" + + mfst = fmri.get_url_path() + baseurl = self.__get_request_url("manifest/0/", pub=pub) + requesturl = urljoin(baseurl, mfst) + + return self._fetch_url( + requesturl, header, compress=True, ccancel=ccancel + ) + + def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): + """Get manifests named in list. The mfstlist argument contains + tuples (fmri, header). This is so that each manifest may have + unique header information. The destination directory is spec- + ified in the dest argument.""" + + baseurl = self.__get_request_url("manifest/0/", pub=pub) + urlmapping = {} + progclass = None + + if progtrack: + progclass = ManifestProgress + + for fmri, h in mfstlist: + f = fmri.get_url_path() + url = urljoin(baseurl, f) + urlmapping[url] = fmri + fn = os.path.join(dest, f) + self._add_file_url( + url, + filepath=fn, + header=h, + compress=True, + progtrack=progtrack, + progclass=progclass, + ) + + # Compute urllist from keys in mapping + urllist = urlmapping.keys() + + try: + while self._engine.pending: + self._engine.run() + except tx.ExcessiveTransientFailure as e: + # Attach a list of failed and successful + # requests to this exception. + errors, success = self._engine.check_status(urllist, True) + + errors = self._annotate_exceptions(errors, urlmapping) + success = self._url_to_request(success, urlmapping) + e.failures = errors + e.success = success + + # Reset the engine before propagating exception. + self._engine.reset() + raise + + errors = self._engine.check_status(urllist) + + # Transient errors are part of standard control flow. + # The repo's caller will look at these and decide whether + # to throw them or not. Permanent failures are raised + # by the transport engine as soon as they occur. + # + # This adds an attribute that describes the request to the + # exception, if we were able to figure it out. + + return self._annotate_exceptions(errors, urlmapping) + + def get_files( + self, filelist, dest, progtrack, version, header=None, pub=None + ): + """Get multiple files from the repo at once. + The files are named by hash and supplied in filelist. + If dest is specified, download to the destination + directory that is given. If progtrack is not None, + it contains a ProgressTracker object for the + downloads.""" + + baseurl = self.__get_request_url("file/{0}/".format(version), pub=pub) + urllist = [] + progclass = None + + if progtrack: + progclass = FileProgress + + for f in filelist: + url = urljoin(baseurl, f) + urllist.append(url) + fn = os.path.join(dest, f) + self._add_file_url( + url, + filepath=fn, + progclass=progclass, + progtrack=progtrack, + header=header, + ) + + try: + while self._engine.pending: + self._engine.run() + except tx.ExcessiveTransientFailure as e: + # Attach a list of failed and successful + # requests to this exception. + errors, success = self._engine.check_status(urllist, True) + + errors = self._annotate_exceptions(errors) + success = self._url_to_request(success) + e.failures = errors + e.success = success + + # Reset the engine before propagating exception. + self._engine.reset() + raise + + errors = self._engine.check_status(urllist) + + # Transient errors are part of standard control flow. + # The repo's caller will look at these and decide whether + # to throw them or not. Permanent failures are raised + # by the transport engine as soon as they occur. + # + # This adds an attribute that describes the request to the + # exception, if we were able to figure it out. + + return self._annotate_exceptions(errors) + + def get_url(self): + """Returns the repo's url.""" + + return self._url + + def get_repouri_key(self): + """Returns the repo's TransportRepoURI key, used to uniquely + identify this TransportRepoURI.""" + + return self._repouri.key() + + def get_versions(self, header=None, ccancel=None): + """Query the repo for versions information. + Returns a fileobject. If server returns 401 (Unauthorized) + check for presence of X-IPkg-Error header and decode.""" + + requesturl = self.__get_request_url("versions/0/") + fobj = self._fetch_url( + requesturl, header, ccancel=ccancel, failonerror=False + ) + + try: + # Bogus request to trigger + # StreamingFileObj.__fill_buffer(), otherwise the + # TransportProtoError won't be raised here. We can't + # use .read() since this will empty the data buffer. + fobj.getheader("octopus", None) + except tx.TransportProtoError as e: + if e.code == http_client.UNAUTHORIZED: + exc_type, exc_value, exc_tb = sys.exc_info() try: - fobj.free_buffer = False - fobj.read() - state = fobj.getheader("State", None) - pkgfmri = fobj.getheader("Package-FMRI", None) - except tx.TransportProtoError as e: - if e.code == http_client.BAD_REQUEST: - exc_type, exc_value, exc_tb = sys.exc_info() - try: - e.details = self._parse_html_error( - fobj.read()) - except: - # If parse fails, raise original - # exception. - if six.PY2: - six.reraise(exc_value, None, - exc_tb) - else: - raise exc_value - raise - finally: - fobj.close() - - return state, pkgfmri - - def publish_close(self, header=None, trans_id=None, - add_to_catalog=False): - """The close operation tells the Repository to commit - the transaction identified by trans_id. The caller may - specify add_to_catalog, if needed. This method returns a - (publish-state, fmri) tuple.""" - - headers = {} - if not add_to_catalog: - headers["X-IPkg-Add-To-Catalog"] = 0 - if header: - headers.update(header) - - baseurl = self.__get_request_url("close/0/") - requesturl = urljoin(baseurl, trans_id) - - fobj = self._fetch_url(requesturl, header=headers, - failonerror=False) - + e.details = self._analyze_server_error( + fobj.getheader("X-IPkg-Error", None) + ) + except: + # If analysis fails, raise original + # exception. + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + raise + return fobj + + def has_version_data(self): + """Returns true if this repo knows its version information.""" + + return self._verdata is not None + + def publish_add(self, action, header=None, progtrack=None, trans_id=None): + """The publish operation that adds content to a repository. + The action must be populated with a data property. + Callers may supply a header, and should supply a transaction + id in trans_id.""" + + attrs = action.attrs + data_fobj = None + data = None + progclass = None + + if progtrack: + progclass = FileProgress + + baseurl = self.__get_request_url("add/0/") + request_str = "{0}/{1}".format(trans_id, action.name) + requesturl = urljoin(baseurl, request_str) + + if action.data: + data_fobj = action.data() + else: + data = "" + + headers = dict( + ("X-IPkg-SetAttr{0}".format(i), "{0}={1}".format(k, attrs[k])) + for i, k in enumerate(attrs) + ) + + if header: + headers.update(header) + + fobj = self._post_url( + requesturl, + header=headers, + data_fobj=data_fobj, + data=data, + failonerror=False, + progclass=progclass, + progtrack=progtrack, + ) + self.__check_response_body(fobj) + + def publish_add_file( + self, pth, header=None, trans_id=None, basename=None, progtrack=None + ): + """The publish operation that adds content to a repository. + Callers may supply a header, and should supply a transaction + id in trans_id.""" + + attrs = {} + progclass = None + + if progtrack: + progclass = FileProgress + + if basename: + attrs["basename"] = basename + + baseurl = self.__get_request_url("file/1/") + requesturl = urljoin(baseurl, trans_id) + + headers = dict( + ("X-IPkg-SetAttr{0}".format(i), "{0}={1}".format(k, attrs[k])) + for i, k in enumerate(attrs) + ) + + if header: + headers.update(header) + + fobj = self._post_url( + requesturl, + header=headers, + data_fp=pth, + progclass=progclass, + progtrack=progtrack, + ) + self.__check_response_body(fobj) + + def publish_add_manifest(self, pth, header=None, trans_id=None): + """The publish operation that adds content to a repository. + Callers may supply a header, and should supply a transaction + id in trans_id.""" + + baseurl = self.__get_request_url("manifest/1/") + requesturl = urljoin(baseurl, trans_id) + # Compress the manifest for the HTTPRepo case. + size = int(os.path.getsize(pth)) + with open(pth, "rb") as f: + data = f.read() + basename = os.path.basename(pth) + ".gz" + dirname = os.path.dirname(pth) + pathname = os.path.join(dirname, basename) + compute_compressed_attrs( + basename, data=data, size=size, compress_dir=dirname + ) + + headers = {} + if header: + headers.update(header) + + fobj = self._post_url(requesturl, header=header, data_fp=pathname) + self.__check_response_body(fobj) + + def publish_abandon(self, header=None, trans_id=None): + """The 'abandon' publication operation, that tells a + Repository to abort the current transaction. The caller + must specify the transaction id in trans_id. Returns + a (publish-state, fmri) tuple.""" + + baseurl = self.__get_request_url("abandon/0/") + requesturl = urljoin(baseurl, trans_id) + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + + try: + fobj.free_buffer = False + fobj.read() + state = fobj.getheader("State", None) + pkgfmri = fobj.getheader("Package-FMRI", None) + except tx.TransportProtoError as e: + if e.code == http_client.BAD_REQUEST: + exc_type, exc_value, exc_tb = sys.exc_info() try: - fobj.free_buffer = False - fobj.read() - state = fobj.getheader("State", None) - pkgfmri = fobj.getheader("Package-FMRI", None) - except tx.TransportProtoError as e: - if e.code == http_client.BAD_REQUEST: - exc_type, exc_value, exc_tb = sys.exc_info() - try: - e.details = self._parse_html_error( - fobj.read()) - except: - # If parse fails, raise original - # exception. - if six.PY2: - six.reraise(exc_value, None, - exc_tb) - else: - raise exc_value - - raise - finally: - fobj.close() - - return state, pkgfmri - - def publish_open(self, header=None, client_release=None, pkg_name=None): - """Begin a publication operation by calling 'open'. - The caller must specify the client's OS release in - client_release, and the package's name in pkg_name. - Returns a transaction-ID.""" - - baseurl = self.__get_request_url("open/0/") - return self.__start_trans(baseurl, header, client_release, - pkg_name) - - def __start_trans(self, baseurl, header, client_release, pkg_name): - """Start a publication transaction.""" - - request_str = quote(pkg_name, "") - requesturl = urljoin(baseurl, request_str) - - headers = {"Client-Release": client_release} - if header: - headers.update(header) - - fobj = self._fetch_url(requesturl, header=headers, - failonerror=False) - + e.details = self._parse_html_error(fobj.read()) + except: + # If parse fails, raise original + # exception. + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + raise + finally: + fobj.close() + + return state, pkgfmri + + def publish_close(self, header=None, trans_id=None, add_to_catalog=False): + """The close operation tells the Repository to commit + the transaction identified by trans_id. The caller may + specify add_to_catalog, if needed. This method returns a + (publish-state, fmri) tuple.""" + + headers = {} + if not add_to_catalog: + headers["X-IPkg-Add-To-Catalog"] = 0 + if header: + headers.update(header) + + baseurl = self.__get_request_url("close/0/") + requesturl = urljoin(baseurl, trans_id) + + fobj = self._fetch_url(requesturl, header=headers, failonerror=False) + + try: + fobj.free_buffer = False + fobj.read() + state = fobj.getheader("State", None) + pkgfmri = fobj.getheader("Package-FMRI", None) + except tx.TransportProtoError as e: + if e.code == http_client.BAD_REQUEST: + exc_type, exc_value, exc_tb = sys.exc_info() try: - fobj.free_buffer = False - fobj.read() - trans_id = fobj.getheader("Transaction-ID", None) - except tx.TransportProtoError as e: - if e.code == http_client.BAD_REQUEST: - exc_type, exc_value, exc_tb = sys.exc_info() - try: - e.details = self._parse_html_error( - fobj.read()) - except: - # If parse fails, raise original - # exception. - if six.PY2: - six.reraise(exc_value, None, - exc_tb) - else: - raise exc_value - raise - finally: - fobj.close() - - return trans_id - - def publish_append(self, header=None, client_release=None, - pkg_name=None): - """Begin a publication operation by calling 'append'. - The caller must specify the client's OS release in - client_release, and the package's name in pkg_name. - Returns a transaction-ID.""" - - baseurl = self.__get_request_url("append/0/") - return self.__start_trans(baseurl, header, client_release, - pkg_name) - - def publish_rebuild(self, header=None, pub=None): - """Attempt to rebuild the package data and search data in the - repository.""" - - requesturl = self.__get_request_url("admin/0", query={ - "cmd": "rebuild" }, pub=pub) - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) - self.__check_response_body(fobj) - - def publish_rebuild_indexes(self, header=None, pub=None): - """Attempt to rebuild the search data in the repository.""" - - requesturl = self.__get_request_url("admin/0", query={ - "cmd": "rebuild-indexes" }, pub=pub) - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) - self.__check_response_body(fobj) - - def publish_rebuild_packages(self, header=None, pub=None): - """Attempt to rebuild the package data in the repository.""" - - requesturl = self.__get_request_url("admin/0", query={ - "cmd": "rebuild-packages" }, pub=pub) - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) - self.__check_response_body(fobj) - - def publish_refresh(self, header=None, pub=None): - """Attempt to refresh the package data and search data in the - repository.""" - - requesturl = self.__get_request_url("admin/0", query={ - "cmd": "refresh" }, pub=pub) - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) - self.__check_response_body(fobj) - - def publish_refresh_indexes(self, header=None, pub=None): - """Attempt to refresh the search data in the repository.""" - - if self.supports_version("admin", [0]) > -1: - requesturl = self.__get_request_url("admin/0", query={ - "cmd": "refresh-indexes" }, pub=pub) - else: - requesturl = self.__get_request_url("index/0/refresh") - - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) - self.__check_response_body(fobj) - - def publish_refresh_packages(self, header=None, pub=None): - """Attempt to refresh the package data in the repository.""" - - requesturl = self.__get_request_url("admin/0", query={ - "cmd": "refresh-packages" }, pub=pub) - fobj = self._fetch_url(requesturl, header=header, - failonerror=False) - self.__check_response_body(fobj) - - def supports_version(self, op, verlist): - """Returns version-id of highest supported version. - If the version is not supported, or no data is available, - -1 is returned instead.""" - - if not self.has_version_data() or op not in self._verdata: - return -1 - - # This code assumes that both the verlist and verdata - # are sorted in reverse order. This behavior is currently - # implemented in the transport code. - - for v in verlist: - if v in self._verdata[op]: - return v - return -1 - - def touch_manifest(self, mfst, header=None, ccancel=None, pub=None): - """Invoke HTTP HEAD to send manifest intent data.""" - - baseurl = self.__get_request_url("manifest/0/", pub=pub) - requesturl = urljoin(baseurl, mfst) - - resp = self._fetch_url_header(requesturl, header, - ccancel=ccancel) - - # response is empty, or should be. - resp.read() - - return True - - def get_compressed_attrs(self, fhash, header=None, pub=None, - trans_id=None, hashes=True): - """Given a fhash, returns a tuple of (csize, chashes) where - 'csize' is the size of the file in the repository and 'chashes' - is a dictionary containing any hashes of the compressed data - known by the repository. If the repository cannot provide the - hash information or 'hashes' is False, chashes will be an empty - dictionary. If the repository does not have the file, a tuple - of (None, None) will be returned instead.""" - - # If the publisher's prefix isn't contained in trans_id, - # assume the server doesn't have the file. - pfx = getattr(pub, "prefix", None) - if (pfx and trans_id and - quote("pkg://{0}/".format(pfx), safe='') not in trans_id): - return (None, None) - - # If caller requests hashes and server supports providing them - # (v2 of file operation), then attempt to retrieve size and - # hashes. Otherwise, fallback to the v0 file operation which - # only returns size (so is faster). - if hashes and self.supports_version("file", [2]) > -1: - version = 2 - else: - version = 0 - - baseurl = self.__get_request_url("file/{0}/".format(version), - pub=pub) - requesturl = urljoin(baseurl, fhash) - + e.details = self._parse_html_error(fobj.read()) + except: + # If parse fails, raise original + # exception. + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + + raise + finally: + fobj.close() + + return state, pkgfmri + + def publish_open(self, header=None, client_release=None, pkg_name=None): + """Begin a publication operation by calling 'open'. + The caller must specify the client's OS release in + client_release, and the package's name in pkg_name. + Returns a transaction-ID.""" + + baseurl = self.__get_request_url("open/0/") + return self.__start_trans(baseurl, header, client_release, pkg_name) + + def __start_trans(self, baseurl, header, client_release, pkg_name): + """Start a publication transaction.""" + + request_str = quote(pkg_name, "") + requesturl = urljoin(baseurl, request_str) + + headers = {"Client-Release": client_release} + if header: + headers.update(header) + + fobj = self._fetch_url(requesturl, header=headers, failonerror=False) + + try: + fobj.free_buffer = False + fobj.read() + trans_id = fobj.getheader("Transaction-ID", None) + except tx.TransportProtoError as e: + if e.code == http_client.BAD_REQUEST: + exc_type, exc_value, exc_tb = sys.exc_info() try: - # see if repository has file - resp = self._fetch_url_header(requesturl, header) - resp.read() - csize = resp.getheader("Content-Length", None) - chashes = dict( - val.split("=", 1) - for hdr, val in six.iteritems(resp.headers) - if hdr.lower().startswith("x-ipkg-attr") - ) - return (csize, chashes) - except Exception: - # repository transport issue or does not have file - return (None, None) - - def build_refetch_header(self, header): - """For HTTP requests that have failed due to corrupt content, - if that request didn't specify 'Cache-control: no-cache' in - its headers then we can try the request with that additional - header, which can help where a web cache is serving corrupt - content. - """ + e.details = self._parse_html_error(fobj.read()) + except: + # If parse fails, raise original + # exception. + if six.PY2: + six.reraise(exc_value, None, exc_tb) + else: + raise exc_value + raise + finally: + fobj.close() + + return trans_id + + def publish_append(self, header=None, client_release=None, pkg_name=None): + """Begin a publication operation by calling 'append'. + The caller must specify the client's OS release in + client_release, and the package's name in pkg_name. + Returns a transaction-ID.""" + + baseurl = self.__get_request_url("append/0/") + return self.__start_trans(baseurl, header, client_release, pkg_name) + + def publish_rebuild(self, header=None, pub=None): + """Attempt to rebuild the package data and search data in the + repository.""" + + requesturl = self.__get_request_url( + "admin/0", query={"cmd": "rebuild"}, pub=pub + ) + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + self.__check_response_body(fobj) + + def publish_rebuild_indexes(self, header=None, pub=None): + """Attempt to rebuild the search data in the repository.""" + + requesturl = self.__get_request_url( + "admin/0", query={"cmd": "rebuild-indexes"}, pub=pub + ) + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + self.__check_response_body(fobj) + + def publish_rebuild_packages(self, header=None, pub=None): + """Attempt to rebuild the package data in the repository.""" + + requesturl = self.__get_request_url( + "admin/0", query={"cmd": "rebuild-packages"}, pub=pub + ) + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + self.__check_response_body(fobj) + + def publish_refresh(self, header=None, pub=None): + """Attempt to refresh the package data and search data in the + repository.""" + + requesturl = self.__get_request_url( + "admin/0", query={"cmd": "refresh"}, pub=pub + ) + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + self.__check_response_body(fobj) + + def publish_refresh_indexes(self, header=None, pub=None): + """Attempt to refresh the search data in the repository.""" + + if self.supports_version("admin", [0]) > -1: + requesturl = self.__get_request_url( + "admin/0", query={"cmd": "refresh-indexes"}, pub=pub + ) + else: + requesturl = self.__get_request_url("index/0/refresh") + + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + self.__check_response_body(fobj) + + def publish_refresh_packages(self, header=None, pub=None): + """Attempt to refresh the package data in the repository.""" + + requesturl = self.__get_request_url( + "admin/0", query={"cmd": "refresh-packages"}, pub=pub + ) + fobj = self._fetch_url(requesturl, header=header, failonerror=False) + self.__check_response_body(fobj) + + def supports_version(self, op, verlist): + """Returns version-id of highest supported version. + If the version is not supported, or no data is available, + -1 is returned instead.""" + + if not self.has_version_data() or op not in self._verdata: + return -1 + + # This code assumes that both the verlist and verdata + # are sorted in reverse order. This behavior is currently + # implemented in the transport code. + + for v in verlist: + if v in self._verdata[op]: + return v + return -1 + + def touch_manifest(self, mfst, header=None, ccancel=None, pub=None): + """Invoke HTTP HEAD to send manifest intent data.""" + + baseurl = self.__get_request_url("manifest/0/", pub=pub) + requesturl = urljoin(baseurl, mfst) + + resp = self._fetch_url_header(requesturl, header, ccancel=ccancel) + + # response is empty, or should be. + resp.read() + + return True + + def get_compressed_attrs( + self, fhash, header=None, pub=None, trans_id=None, hashes=True + ): + """Given a fhash, returns a tuple of (csize, chashes) where + 'csize' is the size of the file in the repository and 'chashes' + is a dictionary containing any hashes of the compressed data + known by the repository. If the repository cannot provide the + hash information or 'hashes' is False, chashes will be an empty + dictionary. If the repository does not have the file, a tuple + of (None, None) will be returned instead.""" + + # If the publisher's prefix isn't contained in trans_id, + # assume the server doesn't have the file. + pfx = getattr(pub, "prefix", None) + if ( + pfx + and trans_id + and quote("pkg://{0}/".format(pfx), safe="") not in trans_id + ): + return (None, None) + + # If caller requests hashes and server supports providing them + # (v2 of file operation), then attempt to retrieve size and + # hashes. Otherwise, fallback to the v0 file operation which + # only returns size (so is faster). + if hashes and self.supports_version("file", [2]) > -1: + version = 2 + else: + version = 0 + + baseurl = self.__get_request_url("file/{0}/".format(version), pub=pub) + requesturl = urljoin(baseurl, fhash) + + try: + # see if repository has file + resp = self._fetch_url_header(requesturl, header) + resp.read() + csize = resp.getheader("Content-Length", None) + chashes = dict( + val.split("=", 1) + for hdr, val in six.iteritems(resp.headers) + if hdr.lower().startswith("x-ipkg-attr") + ) + return (csize, chashes) + except Exception: + # repository transport issue or does not have file + return (None, None) + + def build_refetch_header(self, header): + """For HTTP requests that have failed due to corrupt content, + if that request didn't specify 'Cache-control: no-cache' in + its headers then we can try the request with that additional + header, which can help where a web cache is serving corrupt + content. + """ - if header is None: - header = {} + if header is None: + header = {} - if header.get("Cache-Control", "") != "no-cache": - header["Cache-Control"] = "no-cache" - header["Pragma"] = "no-cache" - return header - return header + if header.get("Cache-Control", "") != "no-cache": + header["Cache-Control"] = "no-cache" + header["Pragma"] = "no-cache" + return header + return header class HTTPSRepo(HTTPRepo): - - def __init__(self, repostats, repouri, engine): - """Create a http repo. Repostats is a RepoStats object. - Repouri is a TransportRepoURI object. Engine is a transport - engine object. - - The convenience function new_repo() can be used to create - the correct repo.""" - - HTTPRepo.__init__(self, repostats, repouri, engine) - - # override the download functions to use ssl cert/key - def _add_file_url(self, url, filepath=None, progclass=None, - progtrack=None, header=None, compress=False): - self._engine.add_url(url, filepath=filepath, - progclass=progclass, progtrack=progtrack, - sslcert=self._repouri.ssl_cert, - sslkey=self._repouri.ssl_key, repourl=self._url, - header=header, compressible=compress, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) - - def _fetch_url(self, url, header=None, compress=False, ccancel=None, - failonerror=True): - return self._engine.get_url(url, header=header, - sslcert=self._repouri.ssl_cert, - sslkey=self._repouri.ssl_key, repourl=self._url, - compressible=compress, ccancel=ccancel, - failonerror=failonerror, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) - - def _fetch_url_header(self, url, header=None, ccancel=None, - failonerror=True): - return self._engine.get_url_header(url, header=header, - sslcert=self._repouri.ssl_cert, - sslkey=self._repouri.ssl_key, repourl=self._url, - ccancel=ccancel, failonerror=failonerror, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) - - def _post_url(self, url, data=None, header=None, ccancel=None, - data_fobj=None, data_fp=None, failonerror=True, progclass=None, - progtrack=None): - return self._engine.send_data(url, data=data, header=header, - sslcert=self._repouri.ssl_cert, - sslkey=self._repouri.ssl_key, repourl=self._url, - ccancel=ccancel, data_fobj=data_fobj, - data_fp=data_fp, failonerror=failonerror, - progclass=progclass, progtrack=progtrack, - runtime_proxy=self._repouri.runtime_proxy, - proxy=self._repouri.proxy) + def __init__(self, repostats, repouri, engine): + """Create a http repo. Repostats is a RepoStats object. + Repouri is a TransportRepoURI object. Engine is a transport + engine object. + + The convenience function new_repo() can be used to create + the correct repo.""" + + HTTPRepo.__init__(self, repostats, repouri, engine) + + # override the download functions to use ssl cert/key + def _add_file_url( + self, + url, + filepath=None, + progclass=None, + progtrack=None, + header=None, + compress=False, + ): + self._engine.add_url( + url, + filepath=filepath, + progclass=progclass, + progtrack=progtrack, + sslcert=self._repouri.ssl_cert, + sslkey=self._repouri.ssl_key, + repourl=self._url, + header=header, + compressible=compress, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) + + def _fetch_url( + self, url, header=None, compress=False, ccancel=None, failonerror=True + ): + return self._engine.get_url( + url, + header=header, + sslcert=self._repouri.ssl_cert, + sslkey=self._repouri.ssl_key, + repourl=self._url, + compressible=compress, + ccancel=ccancel, + failonerror=failonerror, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) + + def _fetch_url_header( + self, url, header=None, ccancel=None, failonerror=True + ): + return self._engine.get_url_header( + url, + header=header, + sslcert=self._repouri.ssl_cert, + sslkey=self._repouri.ssl_key, + repourl=self._url, + ccancel=ccancel, + failonerror=failonerror, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) + + def _post_url( + self, + url, + data=None, + header=None, + ccancel=None, + data_fobj=None, + data_fp=None, + failonerror=True, + progclass=None, + progtrack=None, + ): + return self._engine.send_data( + url, + data=data, + header=header, + sslcert=self._repouri.ssl_cert, + sslkey=self._repouri.ssl_key, + repourl=self._url, + ccancel=ccancel, + data_fobj=data_fobj, + data_fp=data_fp, + failonerror=failonerror, + progclass=progclass, + progtrack=progtrack, + runtime_proxy=self._repouri.runtime_proxy, + proxy=self._repouri.proxy, + ) class _FilesystemRepo(TransportRepo): - """Private implementation of transport repository logic for filesystem - repositories. - """ - - def __init__(self, repostats, repouri, engine, frepo=None): - """Create a file repo. Repostats is a RepoStats object. - Repouri is a TransportRepoURI object. Engine is a transport - engine object. If the caller wants to pass a Repository - object instead of having FileRepo create one, it should - pass the object in the frepo argument. - - The convenience function new_repo() can be used to create - the correct repo.""" - - self._frepo = frepo - self._url = repostats.url - self._repouri = repouri - self._engine = engine - self._verdata = None - self.__stats = repostats - - # If caller supplied a Repository object, we're done. Return. - if self._frepo: - return - - try: - scheme, netloc, path, params, query, fragment = \ - urlparse(self._repouri.uri, "file", - allow_fragments=0) - path = url2pathname(path) - self._frepo = svr_repo.Repository(read_only=True, - root=path) - except cfg.ConfigError as e: - reason = _("The configuration file for the repository " - "is invalid or incomplete:\n{0}").format(e) - ex = tx.TransportProtoError("file", errno.EINVAL, - reason=reason, repourl=self._url) - self.__record_proto_error(ex) - raise ex - except svr_repo.RepositoryInvalidError as e: - ex = tx.TransportProtoError("file", errno.EINVAL, - reason=str(e), repourl=self._url) - self.__record_proto_error(ex) - raise ex - except Exception as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url) - self.__record_proto_error(ex) - raise ex - - def __del__(self): - # Dump search cache if repo goes out of scope. - if self._frepo: - self._frepo.reset_search() - self._frepo = None - - def _add_file_url(self, url, filepath=None, progclass=None, - progtrack=None, header=None, compress=False): - self._engine.add_url(url, filepath=filepath, - progclass=progclass, progtrack=progtrack, repourl=self._url, - header=header, compressible=False) - - def _fetch_url(self, url, header=None, compress=False, ccancel=None, - failonerror=True): - return self._engine.get_url(url, header, repourl=self._url, - compressible=False, ccancel=ccancel, - failonerror=failonerror) - - def _fetch_url_header(self, url, header=None, ccancel=None, - failonerror=True): - return self._engine.get_url_header(url, header, - repourl=self._url, ccancel=ccancel, failonerror=failonerror) - - def __record_proto_error(self, ex): - """Private helper function that records a protocol error that - was raised by the class instead of the transport engine. It - records both that a transaction was initiated and that an - error occurred.""" - + """Private implementation of transport repository logic for filesystem + repositories. + """ + + def __init__(self, repostats, repouri, engine, frepo=None): + """Create a file repo. Repostats is a RepoStats object. + Repouri is a TransportRepoURI object. Engine is a transport + engine object. If the caller wants to pass a Repository + object instead of having FileRepo create one, it should + pass the object in the frepo argument. + + The convenience function new_repo() can be used to create + the correct repo.""" + + self._frepo = frepo + self._url = repostats.url + self._repouri = repouri + self._engine = engine + self._verdata = None + self.__stats = repostats + + # If caller supplied a Repository object, we're done. Return. + if self._frepo: + return + + try: + scheme, netloc, path, params, query, fragment = urlparse( + self._repouri.uri, "file", allow_fragments=0 + ) + path = url2pathname(path) + self._frepo = svr_repo.Repository(read_only=True, root=path) + except cfg.ConfigError as e: + reason = _( + "The configuration file for the repository " + "is invalid or incomplete:\n{0}" + ).format(e) + ex = tx.TransportProtoError( + "file", errno.EINVAL, reason=reason, repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + except svr_repo.RepositoryInvalidError as e: + ex = tx.TransportProtoError( + "file", errno.EINVAL, reason=str(e), repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + except Exception as e: + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=str(e), repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + + def __del__(self): + # Dump search cache if repo goes out of scope. + if self._frepo: + self._frepo.reset_search() + self._frepo = None + + def _add_file_url( + self, + url, + filepath=None, + progclass=None, + progtrack=None, + header=None, + compress=False, + ): + self._engine.add_url( + url, + filepath=filepath, + progclass=progclass, + progtrack=progtrack, + repourl=self._url, + header=header, + compressible=False, + ) + + def _fetch_url( + self, url, header=None, compress=False, ccancel=None, failonerror=True + ): + return self._engine.get_url( + url, + header, + repourl=self._url, + compressible=False, + ccancel=ccancel, + failonerror=failonerror, + ) + + def _fetch_url_header( + self, url, header=None, ccancel=None, failonerror=True + ): + return self._engine.get_url_header( + url, + header, + repourl=self._url, + ccancel=ccancel, + failonerror=failonerror, + ) + + def __record_proto_error(self, ex): + """Private helper function that records a protocol error that + was raised by the class instead of the transport engine. It + records both that a transaction was initiated and that an + error occurred.""" + + self.__stats.record_tx() + self.__stats.record_error(decayable=ex.decayable) + + def add_version_data(self, verdict): + """Cache the information about what versions a repository + supports.""" + + self._verdata = verdict + + def do_search(self, data, header=None, ccancel=None, pub=None): + """Perform a search against repo.""" + + pub_prefix = getattr(pub, "prefix", None) + try: + res_list = self._frepo.search(data, pub=pub_prefix) + except svr_repo.RepositorySearchUnavailableError: + ex = tx.TransportProtoError( + "file", + errno.EAGAIN, + reason=_("Search temporarily unavailable."), + repourl=self._url, + ) + self.__record_proto_error(ex) + raise ex + except sqp.QueryException as e: + ex = tx.TransportProtoError( + "file", errno.EINVAL, reason=str(e), repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + except Exception as e: + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=str(e), repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + + # In order to be able to have a return code distinguish between + # no results and search unavailable, we need to use a different + # http code. Check and see if there's at least one item in + # the results. If not, set the result code to be NO_CONTENT + # and return. If there is at least one result, put the result + # examined back at the front of the results and stream them + # to the user. + if len(res_list) == 1: + try: + tmp = next(res_list[0]) + res_list = [itertools.chain([tmp], res_list[0])] + except StopIteration: self.__stats.record_tx() - self.__stats.record_error(decayable=ex.decayable) - - def add_version_data(self, verdict): - """Cache the information about what versions a repository - supports.""" - - self._verdata = verdict - - def do_search(self, data, header=None, ccancel=None, pub=None): - """Perform a search against repo.""" - - pub_prefix = getattr(pub, "prefix", None) - try: - res_list = self._frepo.search(data, pub=pub_prefix) - except svr_repo.RepositorySearchUnavailableError: - ex = tx.TransportProtoError("file", errno.EAGAIN, - reason=_("Search temporarily unavailable."), - repourl=self._url) - self.__record_proto_error(ex) - raise ex - except sqp.QueryException as e: - ex = tx.TransportProtoError("file", errno.EINVAL, - reason=str(e), repourl=self._url) - self.__record_proto_error(ex) - raise ex - except Exception as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url) - self.__record_proto_error(ex) - raise ex - - # In order to be able to have a return code distinguish between - # no results and search unavailable, we need to use a different - # http code. Check and see if there's at least one item in - # the results. If not, set the result code to be NO_CONTENT - # and return. If there is at least one result, put the result - # examined back at the front of the results and stream them - # to the user. - if len(res_list) == 1: - try: - tmp = next(res_list[0]) - res_list = [itertools.chain([tmp], res_list[0])] - except StopIteration: - self.__stats.record_tx() - raise apx.NegativeSearchResult(self._url) - - def output(): - # Yield the string used to let the client know it's - # talking to a valid search server. - yield str(sqp.Query.VALIDATION_STRING[1]) - for i, res in enumerate(res_list): - for v, return_type, vals in res: - if return_type == \ - sqp.Query.RETURN_ACTIONS: - fmri_str, fv, line = vals - yield "{0} {1} {2} {3} {4}\n".format( - i, return_type, fmri_str, - quote(fv), - line.rstrip()) - elif return_type == \ - sqp.Query.RETURN_PACKAGES: - fmri_str = vals - yield "{0} {1} {2}\n".format( - i, return_type, fmri_str) - return output() - - def get_catalog1(self, filelist, destloc, header=None, ts=None, - progtrack=None, pub=None, revalidate=False, redownload=False): - """Get the files that make up the catalog components - that are listed in 'filelist'. Download the files to - the directory specified in 'destloc'. The caller - may optionally specify a dictionary with header - elements in 'header'. If a conditional get is - to be performed, 'ts' should contain a floating point - value of seconds since the epoch. This protocol - doesn't implment revalidate and redownload. The options - are ignored.""" - - urllist = [] - progclass = None - pub_prefix = getattr(pub, "prefix", None) + raise apx.NegativeSearchResult(self._url) + + def output(): + # Yield the string used to let the client know it's + # talking to a valid search server. + yield str(sqp.Query.VALIDATION_STRING[1]) + for i, res in enumerate(res_list): + for v, return_type, vals in res: + if return_type == sqp.Query.RETURN_ACTIONS: + fmri_str, fv, line = vals + yield "{0} {1} {2} {3} {4}\n".format( + i, return_type, fmri_str, quote(fv), line.rstrip() + ) + elif return_type == sqp.Query.RETURN_PACKAGES: + fmri_str = vals + yield "{0} {1} {2}\n".format(i, return_type, fmri_str) + + return output() + + def get_catalog1( + self, + filelist, + destloc, + header=None, + ts=None, + progtrack=None, + pub=None, + revalidate=False, + redownload=False, + ): + """Get the files that make up the catalog components + that are listed in 'filelist'. Download the files to + the directory specified in 'destloc'. The caller + may optionally specify a dictionary with header + elements in 'header'. If a conditional get is + to be performed, 'ts' should contain a floating point + value of seconds since the epoch. This protocol + doesn't implment revalidate and redownload. The options + are ignored.""" + + urllist = [] + progclass = None + pub_prefix = getattr(pub, "prefix", None) + + if progtrack: + progclass = ProgressCallback + + # create URL for requests + for f in filelist: + try: + url = urlunparse( + ( + "file", + None, + pathname2url(self._frepo.catalog_1(f, pub=pub_prefix)), + None, + None, + None, + ) + ) + except svr_repo.RepositoryError as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + raise ex + + urllist.append(url) + fn = os.path.join(destloc, f) + self._add_file_url( + url, + filepath=fn, + header=header, + progtrack=progtrack, + progclass=progclass, + ) + + try: + while self._engine.pending: + self._engine.run() + except tx.ExcessiveTransientFailure as e: + # Attach a list of failed and successful + # requests to this exception. + errors, success = self._engine.check_status(urllist, True) + + errors = self._annotate_exceptions(errors) + success = self._url_to_request(success) + e.failures = errors + e.success = success + + # Reset the engine before propagating exception. + self._engine.reset() + raise + + errors = self._engine.check_status(urllist) + + # Transient errors are part of standard control flow. + # The repo's caller will look at these and decide whether + # to throw them or not. Permanent failures are raised + # by the transport engine as soon as they occur. + # + # This adds an attribute that describes the request to the + # exception, if we were able to figure it out. + + return self._annotate_exceptions(errors) + + def get_datastream( + self, fhash, version, header=None, ccancel=None, pub=None + ): + """Get a datastream from a repo. The name of the + file is given in fhash.""" + + pub_prefix = getattr(pub, "prefix", None) + try: + requesturl = urlunparse( + ( + "file", + None, + pathname2url(self._frepo.file(fhash, pub=pub_prefix)), + None, + None, + None, + ) + ) + except svr_repo.RepositoryFileNotFoundError as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=fhash, + ) + self.__record_proto_error(ex) + raise ex + except svr_repo.RepositoryError as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=fhash, + ) + self.__record_proto_error(ex) + raise ex + return self._fetch_url(requesturl, header, ccancel=ccancel) + + def get_publisherinfo(self, header=None, ccancel=None): + """Get publisher information from the repository.""" + + try: + pubs = self._frepo.get_publishers() + buf = cStringIO() + p5i.write(buf, pubs) + except Exception as e: + reason = ( + "Unable to retrieve publisher configuration " + "data:\n{0}".format(e) + ) + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=reason, repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + buf.seek(0) + return buf + + def get_status(self, header=None, ccancel=None): + """Get status/0 information from the repository.""" + + buf = cStringIO() + try: + rstatus = self._frepo.get_status() + json.dump( + rstatus, buf, ensure_ascii=False, indent=2, sort_keys=True + ) + buf.write("\n") + except Exception as e: + reason = "Unable to retrieve status data:\n{0}".format(e) + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=reason, repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + buf.seek(0) + return buf + + def get_manifest(self, fmri, header=None, ccancel=None, pub=None): + """Get a manifest from repo. The fmri of the package for the + manifest is given in fmri.""" + + pub_prefix = getattr(pub, "prefix", None) + try: + requesturl = urlunparse( + ( + "file", + None, + pathname2url(self._frepo.manifest(fmri, pub=pub_prefix)), + None, + None, + None, + ) + ) + except svr_repo.RepositoryError as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=str(fmri), + ) + self.__record_proto_error(ex) + raise ex + + return self._fetch_url(requesturl, header, ccancel=ccancel) + + def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): + """Get manifests named in list. The mfstlist argument contains + tuples (fmri, header). This is so that each manifest may have + unique header information. The destination directory is spec- + ified in the dest argument.""" + + urlmapping = {} + progclass = None + pub_prefix = getattr(pub, "prefix", None) + + if progtrack: + progclass = ManifestProgress + + # Errors that happen before the engine is executed must be + # collected and added to the errors raised during engine + # execution so that batch processing occurs as expected. + pre_exec_errors = [] + for fmri, h in mfstlist: + try: + url = urlunparse( + ( + "file", + None, + pathname2url( + self._frepo.manifest(fmri, pub=pub_prefix) + ), + None, + None, + None, + ) + ) + except svr_repo.RepositoryError as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=str(fmri), + ) + self.__record_proto_error(ex) + pre_exec_errors.append(ex) + continue + urlmapping[url] = fmri + fn = os.path.join(dest, fmri.get_url_path()) + self._add_file_url( + url, + filepath=fn, + header=h, + progtrack=progtrack, + progclass=progclass, + ) + + urllist = urlmapping.keys() + + try: + while self._engine.pending: + self._engine.run() + except tx.ExcessiveTransientFailure as e: + # Attach a list of failed and successful + # requests to this exception. + errors, success = self._engine.check_status(urllist, True) + + errors = self._annotate_exceptions(errors, urlmapping) + errors.extend(pre_exec_errors) + success = self._url_to_request(success, urlmapping) + e.failures = errors + e.success = success + + # Reset the engine before propagating exception. + self._engine.reset() + raise + + errors = self._engine.check_status(urllist) + + # Transient errors are part of standard control flow. + # The repo's caller will look at these and decide whether + # to throw them or not. Permanent failures are raised + # by the transport engine as soon as they occur. + # + # This adds an attribute that describes the request to the + # exception, if we were able to figure it out. + errors = self._annotate_exceptions(errors, urlmapping) + + return errors + pre_exec_errors + + def get_files( + self, filelist, dest, progtrack, version, header=None, pub=None + ): + """Get multiple files from the repo at once. + The files are named by hash and supplied in filelist. + If dest is specified, download to the destination + directory that is given. If progtrack is not None, + it contains a ProgressTracker object for the + downloads.""" + + urllist = [] + progclass = None + pub_prefix = getattr(pub, "prefix", None) + + if progtrack: + progclass = FileProgress + + # Errors that happen before the engine is executed must be + # collected and added to the errors raised during engine + # execution so that batch processing occurs as expected. + pre_exec_errors = [] + for f in filelist: + try: + url = urlunparse( + ( + "file", + None, + pathname2url(self._frepo.file(f, pub=pub_prefix)), + None, + None, + None, + ) + ) + except svr_repo.RepositoryFileNotFoundError as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + pre_exec_errors.append(ex) + continue + except svr_repo.RepositoryError as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + pre_exec_errors.append(ex) + continue + urllist.append(url) + fn = os.path.join(dest, f) + self._add_file_url( + url, + filepath=fn, + progclass=progclass, + progtrack=progtrack, + header=header, + ) + + try: + while self._engine.pending: + self._engine.run() + except tx.ExcessiveTransientFailure as e: + # Attach a list of failed and successful + # requests to this exception. + errors, success = self._engine.check_status(urllist, True) + + errors = self._annotate_exceptions(errors) + errors.extend(pre_exec_errors) + success = self._url_to_request(success) + e.failures = errors + e.success = success + + # Reset the engine before propagating exception. + self._engine.reset() + raise + + errors = self._engine.check_status(urllist) + + # Transient errors are part of standard control flow. + # The repo's caller will look at these and decide whether + # to throw them or not. Permanent failures are raised + # by the transport engine as soon as they occur. + # + # This adds an attribute that describes the request to the + # exception, if we were able to figure it out. + errors = self._annotate_exceptions(errors) + + return errors + pre_exec_errors + + def get_url(self): + """Returns the repo's url.""" + + return self._url + + def get_repouri_key(self): + """Returns a key from the TransportRepoURI that can be + used in a dictionary""" + + return self._repouri.key() + + def get_versions(self, header=None, ccancel=None): + """Query the repo for versions information. + Returns a file-like object.""" + + buf = cStringIO() + vops = { + "abandon": ["0"], + "add": ["0"], + "admin": ["0"], + "append": ["0"], + "catalog": ["1"], + "close": ["0"], + "file": ["0", "1"], + "manifest": ["0", "1"], + "open": ["0"], + "publisher": ["0", "1"], + "search": ["1"], + "status": ["0"], + "versions": ["0"], + } - if progtrack: - progclass = ProgressCallback - - # create URL for requests - for f in filelist: - try: - url = urlunparse(("file", None, - pathname2url(self._frepo.catalog_1(f, - pub=pub_prefix)), None, None, None)) - except svr_repo.RepositoryError as e: - ex = tx.TransportProtoError("file", - errno.EPROTO, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - raise ex - - urllist.append(url) - fn = os.path.join(destloc, f) - self._add_file_url(url, filepath=fn, header=header, - progtrack=progtrack, progclass=progclass) + buf.write("pkg-server {0}\n".format(pkg.VERSION)) + buf.write( + "\n".join( + "{0} {1}".format(op, " ".join(vers)) + for op, vers in six.iteritems(vops) + ) + + "\n" + ) + buf.seek(0) + self.__stats.record_tx() + return buf + + def has_version_data(self): + """Returns true if this repo knows its version information.""" + + return self._verdata is not None + + def publish_add(self, action, header=None, progtrack=None, trans_id=None): + """The publish operation that adds an action and its + payload (if applicable) to an existing transaction in a + repository. The action must be populated with a data property. + Callers may supply a header, and should supply a transaction + id in trans_id.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + progclass = None + if progtrack: + progclass = FileProgress + progtrack = progclass(progtrack) + + try: + self._frepo.add(trans_id, action) + except svr_repo.RepositoryError as e: + if progtrack: + progtrack.abort() + raise tx.TransportOperationError(str(e)) + else: + if progtrack: + sz = int(action.attrs.get("pkg.size", 0)) + progtrack.progress_callback(0, 0, sz, sz) + + def publish_add_file( + self, pth, header=None, trans_id=None, basename=None, progtrack=None + ): + """The publish operation that adds a file to an existing + transaction.""" + + progclass = None + if progtrack: + progclass = FileProgress + progtrack = progclass(progtrack) + sz = int(os.path.getsize(pth)) + + try: + self._frepo.add_file(trans_id, pth, basename, size=sz) + except svr_repo.RepositoryError as e: + if progtrack: + progtrack.abort() + raise tx.TransportOperationError(str(e)) + else: + if progtrack: + progtrack.progress_callback(0, 0, sz, sz) + + def publish_add_manifest(self, pth, header=None, trans_id=None): + """The publish operation that adds a manifest to an existing + transaction.""" + + try: + self._frepo.add_manifest(trans_id, pth) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def publish_abandon(self, header=None, trans_id=None): + """The abandon operation, that tells a Repository to abort + the current transaction. The caller must specify the + transaction id in trans_id. Returns a (publish-state, fmri) + tuple.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + try: + pkg_state = self._frepo.abandon(trans_id) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + return None, pkg_state + + def publish_close(self, header=None, trans_id=None, add_to_catalog=False): + """The close operation tells the Repository to commit + the transaction identified by trans_id. The caller may + specify add_to_catalog, if needed. This method returns a + (publish-state, fmri) tuple.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + try: + pkg_fmri, pkg_state = self._frepo.close( + trans_id, add_to_catalog=add_to_catalog + ) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + return pkg_fmri, pkg_state + + def publish_open(self, header=None, client_release=None, pkg_name=None): + """Begin a publication operation by calling 'open'. + The caller must specify the client's OS release in + client_release, and the package's name in pkg_name. + Returns a transaction-ID string.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + try: + trans_id = self._frepo.open(client_release, pkg_name) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + return trans_id + + def publish_append(self, header=None, client_release=None, pkg_name=None): + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + try: + trans_id = self._frepo.append(client_release, pkg_name) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + return trans_id + + def publish_rebuild(self, header=None, pub=None): + """Attempt to rebuild the package data and search data in the + repository.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + pub_prefix = getattr(pub, "prefix", None) + try: + self._frepo.rebuild( + pub=pub_prefix, build_catalog=True, build_index=True + ) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def publish_rebuild_indexes(self, header=None, pub=None): + """Attempt to rebuild the search data in the repository.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + pub_prefix = getattr(pub, "prefix", None) + try: + self._frepo.rebuild( + pub=pub_prefix, build_catalog=False, build_index=True + ) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def publish_rebuild_packages(self, header=None, pub=None): + """Attempt to rebuild the package data in the repository.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + pub_prefix = getattr(pub, "prefix", None) + try: + self._frepo.rebuild( + pub=pub_prefix, build_catalog=True, build_index=False + ) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def publish_refresh(self, header=None, pub=None): + """Attempt to refresh the package data and search data in the + repository.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + pub_prefix = getattr(pub, "prefix", None) + try: + self._frepo.add_content(pub=pub_prefix, refresh_index=True) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def publish_refresh_indexes(self, header=None, pub=None): + """Attempt to refresh the search data in the repository.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + try: + self._frepo.refresh_index() + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def publish_refresh_packages(self, header=None, pub=None): + """Attempt to refresh the package data in the repository.""" + + # Calling any publication operation sets read_only to False. + self._frepo.read_only = False + + pub_prefix = getattr(pub, "prefix", None) + try: + self._frepo.add_content(pub=pub_prefix, refresh_index=False) + except svr_repo.RepositoryError as e: + raise tx.TransportOperationError(str(e)) + + def supports_version(self, op, verlist): + """Returns version-id of highest supported version. + If the version is not supported, or no data is available, + -1 is returned instead.""" + + if not self.has_version_data() or op not in self._verdata: + return -1 + + # This code assumes that both the verlist and verdata + # are sorted in reverse order. This behavior is currently + # implemented in the transport code. + + for v in verlist: + if v in self._verdata[op]: + return v + return -1 + + def touch_manifest(self, mfst, header=None, ccancel=None, pub=None): + """No-op for file://.""" + + return True + + def get_compressed_attrs( + self, fhash, header=None, pub=None, trans_id=None, hashes=True + ): + """Given a fhash, returns a tuple of (csize, chashes) where + 'csize' is the size of the file in the repository and 'chashes' + is a dictionary containing any hashes of the compressed data + known by the repository. If the repository cannot provide the + hash information or 'hashes' is False, chashes will be an empty + dictionary. If the repository does not have the file, a tuple + of (None, None) will be returned instead.""" + + # If the publisher's prefix isn't contained in trans_id, + # assume the server doesn't have the file. + pfx = getattr(pub, "prefix", None) + if ( + pfx + and trans_id + and quote("pkg://{0}/".format(pfx), safe="") not in trans_id + ): + return (None, None) + + try: + # see if repository has file + fpath = self._frepo.file(fhash, pub=pfx) + if hashes: + csize, chashes = compute_compressed_attrs( + fhash, file_path=fpath + ) + else: + csize = os.stat(fpath).st_size + chashes = EmptyDict + return (csize, chashes) + except ( + EnvironmentError, + svr_repo.RepositoryError, + svr_repo.RepositoryFileNotFoundError, + ): + # repository transport issue or does not have file + return (None, None) - try: - while self._engine.pending: - self._engine.run() - except tx.ExcessiveTransientFailure as e: - # Attach a list of failed and successful - # requests to this exception. - errors, success = self._engine.check_status(urllist, - True) - - errors = self._annotate_exceptions(errors) - success = self._url_to_request(success) - e.failures = errors - e.success = success - - # Reset the engine before propagating exception. - self._engine.reset() - raise - - errors = self._engine.check_status(urllist) - - # Transient errors are part of standard control flow. - # The repo's caller will look at these and decide whether - # to throw them or not. Permanent failures are raised - # by the transport engine as soon as they occur. - # - # This adds an attribute that describes the request to the - # exception, if we were able to figure it out. - - return self._annotate_exceptions(errors) - - def get_datastream(self, fhash, version, header=None, ccancel=None, pub=None): - """Get a datastream from a repo. The name of the - file is given in fhash.""" - - pub_prefix = getattr(pub, "prefix", None) - try: - requesturl = urlunparse(("file", None, - pathname2url(self._frepo.file(fhash, - pub=pub_prefix)), None, None, None)) - except svr_repo.RepositoryFileNotFoundError as e: - ex = tx.TransportProtoError("file", errno.ENOENT, - reason=str(e), repourl=self._url, request=fhash) - self.__record_proto_error(ex) - raise ex - except svr_repo.RepositoryError as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url, request=fhash) - self.__record_proto_error(ex) - raise ex - return self._fetch_url(requesturl, header, ccancel=ccancel) - - def get_publisherinfo(self, header=None, ccancel=None): - """Get publisher information from the repository.""" + def build_refetch_header(self, header): + """Pointless to attempt refetch of corrupt content for + this protocol.""" - try: - pubs = self._frepo.get_publishers() - buf = cStringIO() - p5i.write(buf, pubs) - except Exception as e: - reason = "Unable to retrieve publisher configuration " \ - "data:\n{0}".format(e) - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=reason, repourl=self._url) - self.__record_proto_error(ex) - raise ex - buf.seek(0) - return buf - - def get_status(self, header=None, ccancel=None): - """Get status/0 information from the repository.""" - - buf = cStringIO() - try: - rstatus = self._frepo.get_status() - json.dump(rstatus, buf, ensure_ascii=False, indent=2, - sort_keys=True) - buf.write("\n") - except Exception as e: - reason = "Unable to retrieve status data:\n{0}".format(e) - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=reason, repourl=self._url) - self.__record_proto_error(ex) - raise ex - buf.seek(0) - return buf - - def get_manifest(self, fmri, header=None, ccancel=None, pub=None): - """Get a manifest from repo. The fmri of the package for the - manifest is given in fmri.""" - - pub_prefix = getattr(pub, "prefix", None) - try: - requesturl = urlunparse(("file", None, - pathname2url(self._frepo.manifest(fmri, - pub=pub_prefix)), None, None, None)) - except svr_repo.RepositoryError as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url, request=str(fmri)) - self.__record_proto_error(ex) - raise ex - - return self._fetch_url(requesturl, header, ccancel=ccancel) - - def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): - """Get manifests named in list. The mfstlist argument contains - tuples (fmri, header). This is so that each manifest may have - unique header information. The destination directory is spec- - ified in the dest argument.""" - - urlmapping = {} - progclass = None - pub_prefix = getattr(pub, "prefix", None) + return header - if progtrack: - progclass = ManifestProgress - - # Errors that happen before the engine is executed must be - # collected and added to the errors raised during engine - # execution so that batch processing occurs as expected. - pre_exec_errors = [] - for fmri, h in mfstlist: - try: - url = urlunparse(("file", None, - pathname2url(self._frepo.manifest( - fmri, pub=pub_prefix)), None, None, None)) - except svr_repo.RepositoryError as e: - ex = tx.TransportProtoError("file", - errno.EPROTO, reason=str(e), - repourl=self._url, request=str(fmri)) - self.__record_proto_error(ex) - pre_exec_errors.append(ex) - continue - urlmapping[url] = fmri - fn = os.path.join(dest, fmri.get_url_path()) - self._add_file_url(url, filepath=fn, header=h, - progtrack=progtrack, progclass=progclass) - - urllist = urlmapping.keys() - try: - while self._engine.pending: - self._engine.run() - except tx.ExcessiveTransientFailure as e: - # Attach a list of failed and successful - # requests to this exception. - errors, success = self._engine.check_status(urllist, - True) - - errors = self._annotate_exceptions(errors, urlmapping) - errors.extend(pre_exec_errors) - success = self._url_to_request(success, urlmapping) - e.failures = errors - e.success = success - - # Reset the engine before propagating exception. - self._engine.reset() - raise - - errors = self._engine.check_status(urllist) - - # Transient errors are part of standard control flow. - # The repo's caller will look at these and decide whether - # to throw them or not. Permanent failures are raised - # by the transport engine as soon as they occur. - # - # This adds an attribute that describes the request to the - # exception, if we were able to figure it out. - errors = self._annotate_exceptions(errors, urlmapping) - - return errors + pre_exec_errors - - def get_files(self, filelist, dest, progtrack, version, header=None, pub=None): - """Get multiple files from the repo at once. - The files are named by hash and supplied in filelist. - If dest is specified, download to the destination - directory that is given. If progtrack is not None, - it contains a ProgressTracker object for the - downloads.""" - - urllist = [] - progclass = None - pub_prefix = getattr(pub, "prefix", None) +class _ArchiveRepo(TransportRepo): + """Private implementation of transport repository logic for repositories + contained within an archive. + """ + + def __init__(self, repostats, repouri, engine): + """Create a file repo. Repostats is a RepoStats object. + Repouri is a TransportRepoURI object. Engine is a transport + engine object. + + The convenience function new_repo() can be used to create + the correct repo.""" + + self._arc = None + self._url = repostats.url + self._repouri = repouri + self._engine = engine + self._verdata = None + self.__stats = repostats + + try: + scheme, netloc, path, params, query, fragment = urlparse( + self._repouri.uri, "file", allow_fragments=0 + ) + # Path must be rstripped of separators to be used as + # a file. + path = url2pathname(path.rstrip(os.path.sep)) + self._arc = pkg.p5p.Archive(path, mode="r") + except pkg.p5p.InvalidArchive as e: + ex = tx.TransportProtoError( + "file", errno.EINVAL, reason=str(e), repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + except Exception as e: + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=str(e), repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + + def __record_proto_error(self, ex): + """Private helper function that records a protocol error that + was raised by the class instead of the transport engine. It + records both that a transaction was initiated and that an + error occurred.""" + + self.__stats.record_tx() + self.__stats.record_error(decayable=ex.decayable) + + def add_version_data(self, verdict): + """Cache the information about what versions a repository + supports.""" + + self._verdata = verdict + + def get_status(self, header=None, ccancel=None): + """Get the archive status.""" + + pubsinfo = {} + arcdata = { + "repository": { + "publishers": pubsinfo, + "version": self._arc.version, # Version of archive. + } + } - if progtrack: - progclass = FileProgress - - # Errors that happen before the engine is executed must be - # collected and added to the errors raised during engine - # execution so that batch processing occurs as expected. - pre_exec_errors = [] - for f in filelist: - try: - url = urlunparse(("file", None, - pathname2url(self._frepo.file(f, - pub=pub_prefix)), None, None, None)) - except svr_repo.RepositoryFileNotFoundError as e: - ex = tx.TransportProtoError("file", - errno.ENOENT, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - pre_exec_errors.append(ex) - continue - except svr_repo.RepositoryError as e: - ex = tx.TransportProtoError("file", - errno.EPROTO, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - pre_exec_errors.append(ex) - continue - urllist.append(url) - fn = os.path.join(dest, f) - self._add_file_url(url, filepath=fn, - progclass=progclass, progtrack=progtrack, - header=header) + for pub in self._arc.get_publishers(): + try: + # Create temporary directory. + tmpdir = tempfile.mkdtemp(prefix="tmp.repo.") + # Get catalog. + self.get_catalog1( + ["catalog.attrs"], tmpdir, pub=pub, header=header + ) - try: - while self._engine.pending: - self._engine.run() - except tx.ExcessiveTransientFailure as e: - # Attach a list of failed and successful - # requests to this exception. - errors, success = self._engine.check_status(urllist, - True) - - errors = self._annotate_exceptions(errors) - errors.extend(pre_exec_errors) - success = self._url_to_request(success) - e.failures = errors - e.success = success - - # Reset the engine before propagating exception. - self._engine.reset() - raise - - errors = self._engine.check_status(urllist) - - # Transient errors are part of standard control flow. - # The repo's caller will look at these and decide whether - # to throw them or not. Permanent failures are raised - # by the transport engine as soon as they occur. - # - # This adds an attribute that describes the request to the - # exception, if we were able to figure it out. - errors = self._annotate_exceptions(errors) - - return errors + pre_exec_errors - - def get_url(self): - """Returns the repo's url.""" - - return self._url - - def get_repouri_key(self): - """Returns a key from the TransportRepoURI that can be - used in a dictionary""" - - return self._repouri.key() - - def get_versions(self, header=None, ccancel=None): - """Query the repo for versions information. - Returns a file-like object.""" - - buf = cStringIO() - vops = { - "abandon": ["0"], - "add": ["0"], - "admin": ["0"], - "append": ["0"], - "catalog": ["1"], - "close": ["0"], - "file": ["0", "1"], - "manifest": ["0", "1"], - "open": ["0"], - "publisher": ["0", "1"], - "search": ["1"], - "status": ["0"], - "versions": ["0"], + cat = pkg.catalog.Catalog(meta_root=tmpdir) + pubinfo = {} + pubinfo[ + "last-catalog-update" + ] = pkg.catalog.datetime_to_basic_ts(cat.last_modified) + pubinfo["package-count"] = cat.package_count + pubinfo["status"] = "online" + pubsinfo[pub.prefix] = { + "package-count": cat.package_count, + "last-catalog-update": pkg.catalog.datetime_to_basic_ts( + cat.last_modified + ), + "status": "online", } - - buf.write("pkg-server {0}\n".format(pkg.VERSION)) - buf.write("\n".join( - "{0} {1}".format(op, " ".join(vers)) - for op, vers in six.iteritems(vops) - ) + "\n") - buf.seek(0) - self.__stats.record_tx() - return buf - - def has_version_data(self): - """Returns true if this repo knows its version information.""" - - return self._verdata is not None - - def publish_add(self, action, header=None, progtrack=None, - trans_id=None): - """The publish operation that adds an action and its - payload (if applicable) to an existing transaction in a - repository. The action must be populated with a data property. - Callers may supply a header, and should supply a transaction - id in trans_id.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - progclass = None + finally: + # Remove temporary directory if possible. + shutil.rmtree(tmpdir, ignore_errors=True) + + buf = cStringIO() + try: + json.dump( + arcdata, buf, ensure_ascii=False, indent=2, sort_keys=True + ) + buf.write("\n") + except Exception as e: + reason = "Unable to retrieve status data:\n{0}".format(e) + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=reason, repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + buf.seek(0) + return buf + + def get_catalog1( + self, + filelist, + destloc, + header=None, + ts=None, + progtrack=None, + pub=None, + revalidate=False, + redownload=False, + ): + """Get the files that make up the catalog components + that are listed in 'filelist'. Download the files to + the directory specified in 'destloc'. The caller + may optionally specify a dictionary with header + elements in 'header'. If a conditional get is + to be performed, 'ts' should contain a floating point + value of seconds since the epoch. This protocol + doesn't implment revalidate and redownload. The options + are ignored.""" + + pub_prefix = getattr(pub, "prefix", None) + errors = [] + for f in filelist: + try: + self._arc.extract_catalog1(f, destloc, pub=pub_prefix) if progtrack: - progclass = FileProgress - progtrack = progclass(progtrack) - - try: - self._frepo.add(trans_id, action) - except svr_repo.RepositoryError as e: - if progtrack: - progtrack.abort() - raise tx.TransportOperationError(str(e)) - else: - if progtrack: - sz = int(action.attrs.get("pkg.size", 0)) - progtrack.progress_callback(0, 0, sz, sz) - - def publish_add_file(self, pth, header=None, trans_id=None, - basename=None, progtrack=None): - """The publish operation that adds a file to an existing - transaction.""" - - progclass = None + fs = os.stat(os.path.join(destloc, f)) + progtrack.refresh_progress(pub, fs.st_size) + except pkg.p5p.UnknownArchiveFiles as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + errors.append(ex) + continue + except Exception as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + errors.append(ex) + continue + return errors + + def get_datastream( + self, fhash, version, header=None, ccancel=None, pub=None + ): + """Get a datastream from a repo. The name of the file is given + in fhash.""" + + pub_prefix = getattr(pub, "prefix", None) + try: + return self._arc.get_package_file(fhash, pub=pub_prefix) + except pkg.p5p.UnknownArchiveFiles as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=fhash, + ) + self.__record_proto_error(ex) + raise ex + except Exception as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=fhash, + ) + self.__record_proto_error(ex) + raise ex + + def get_publisherinfo(self, header=None, ccancel=None): + """Get publisher information from the repository.""" + + try: + pubs = self._arc.get_publishers() + buf = cStringIO() + p5i.write(buf, pubs) + except Exception as e: + reason = ( + "Unable to retrieve publisher configuration " + "data:\n{0}".format(e) + ) + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=reason, repourl=self._url + ) + self.__record_proto_error(ex) + raise ex + buf.seek(0) + return buf + + def get_manifest(self, fmri, header=None, ccancel=None, pub=None): + """Get a manifest from repo. The fmri of the package for the + manifest is given in fmri.""" + + try: + return self._arc.get_package_manifest(fmri, raw=True) + except pkg.p5p.UnknownPackageManifest as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=fmri, + ) + self.__record_proto_error(ex) + raise ex + except Exception as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=fmri, + ) + self.__record_proto_error(ex) + raise ex + + def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): + """Get manifests named in list. The mfstlist argument contains + tuples (fmri, header). This is so that each manifest may have + unique header information. The destination directory is spec- + ified in the dest argument.""" + + errors = [] + for fmri, h in mfstlist: + try: + self._arc.extract_package_manifest( + fmri, dest, filename=fmri.get_url_path() + ) if progtrack: - progclass = FileProgress - progtrack = progclass(progtrack) - sz = int(os.path.getsize(pth)) - - try: - self._frepo.add_file(trans_id, pth, basename, size=sz) - except svr_repo.RepositoryError as e: - if progtrack: - progtrack.abort() - raise tx.TransportOperationError(str(e)) - else: - if progtrack: - - progtrack.progress_callback(0, 0, sz, sz) - - def publish_add_manifest(self, pth, header=None, trans_id=None): - """The publish operation that adds a manifest to an existing - transaction.""" - - try: - self._frepo.add_manifest(trans_id, pth) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def publish_abandon(self, header=None, trans_id=None): - """The abandon operation, that tells a Repository to abort - the current transaction. The caller must specify the - transaction id in trans_id. Returns a (publish-state, fmri) - tuple.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - try: - pkg_state = self._frepo.abandon(trans_id) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - return None, pkg_state - - def publish_close(self, header=None, trans_id=None, - add_to_catalog=False): - """The close operation tells the Repository to commit - the transaction identified by trans_id. The caller may - specify add_to_catalog, if needed. This method returns a - (publish-state, fmri) tuple.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - try: - pkg_fmri, pkg_state = self._frepo.close(trans_id, - add_to_catalog=add_to_catalog) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - return pkg_fmri, pkg_state - - def publish_open(self, header=None, client_release=None, pkg_name=None): - """Begin a publication operation by calling 'open'. - The caller must specify the client's OS release in - client_release, and the package's name in pkg_name. - Returns a transaction-ID string.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - try: - trans_id = self._frepo.open(client_release, pkg_name) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - return trans_id - - def publish_append(self, header=None, client_release=None, - pkg_name=None): - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - try: - trans_id = self._frepo.append(client_release, pkg_name) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - return trans_id - - def publish_rebuild(self, header=None, pub=None): - """Attempt to rebuild the package data and search data in the - repository.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - pub_prefix = getattr(pub, "prefix", None) - try: - self._frepo.rebuild(pub=pub_prefix, - build_catalog=True, build_index=True) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def publish_rebuild_indexes(self, header=None, pub=None): - """Attempt to rebuild the search data in the repository.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - pub_prefix = getattr(pub, "prefix", None) - try: - self._frepo.rebuild(pub=pub_prefix, - build_catalog=False, build_index=True) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def publish_rebuild_packages(self, header=None, pub=None): - """Attempt to rebuild the package data in the repository.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - pub_prefix = getattr(pub, "prefix", None) - try: - self._frepo.rebuild(pub=pub_prefix, - build_catalog=True, build_index=False) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def publish_refresh(self, header=None, pub=None): - """Attempt to refresh the package data and search data in the - repository.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - pub_prefix = getattr(pub, "prefix", None) - try: - self._frepo.add_content(pub=pub_prefix, - refresh_index=True) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def publish_refresh_indexes(self, header=None, pub=None): - """Attempt to refresh the search data in the repository.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - try: - self._frepo.refresh_index() - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def publish_refresh_packages(self, header=None, pub=None): - """Attempt to refresh the package data in the repository.""" - - # Calling any publication operation sets read_only to False. - self._frepo.read_only = False - - pub_prefix = getattr(pub, "prefix", None) - try: - self._frepo.add_content(pub=pub_prefix, - refresh_index=False) - except svr_repo.RepositoryError as e: - raise tx.TransportOperationError(str(e)) - - def supports_version(self, op, verlist): - """Returns version-id of highest supported version. - If the version is not supported, or no data is available, - -1 is returned instead.""" - - if not self.has_version_data() or op not in self._verdata: - return -1 - - # This code assumes that both the verlist and verdata - # are sorted in reverse order. This behavior is currently - # implemented in the transport code. - - for v in verlist: - if v in self._verdata[op]: - return v - return -1 - - def touch_manifest(self, mfst, header=None, ccancel=None, pub=None): - """No-op for file://.""" - - return True - - def get_compressed_attrs(self, fhash, header=None, pub=None, - trans_id=None, hashes=True): - """Given a fhash, returns a tuple of (csize, chashes) where - 'csize' is the size of the file in the repository and 'chashes' - is a dictionary containing any hashes of the compressed data - known by the repository. If the repository cannot provide the - hash information or 'hashes' is False, chashes will be an empty - dictionary. If the repository does not have the file, a tuple - of (None, None) will be returned instead.""" - - # If the publisher's prefix isn't contained in trans_id, - # assume the server doesn't have the file. - pfx = getattr(pub, "prefix", None) - if (pfx and trans_id and - quote("pkg://{0}/".format(pfx), safe='') not in trans_id): - return (None, None) - - try: - # see if repository has file - fpath = self._frepo.file(fhash, pub=pfx) - if hashes: - csize, chashes = compute_compressed_attrs(fhash, - file_path=fpath) - else: - csize = os.stat(fpath).st_size - chashes = EmptyDict - return (csize, chashes) - except (EnvironmentError, - svr_repo.RepositoryError, - svr_repo.RepositoryFileNotFoundError): - # repository transport issue or does not have file - return (None, None) - - def build_refetch_header(self, header): - """Pointless to attempt refetch of corrupt content for - this protocol.""" - - return header - -class _ArchiveRepo(TransportRepo): - """Private implementation of transport repository logic for repositories - contained within an archive. - """ - - def __init__(self, repostats, repouri, engine): - """Create a file repo. Repostats is a RepoStats object. - Repouri is a TransportRepoURI object. Engine is a transport - engine object. - - The convenience function new_repo() can be used to create - the correct repo.""" - - self._arc = None - self._url = repostats.url - self._repouri = repouri - self._engine = engine - self._verdata = None - self.__stats = repostats - - try: - scheme, netloc, path, params, query, fragment = \ - urlparse(self._repouri.uri, "file", - allow_fragments=0) - # Path must be rstripped of separators to be used as - # a file. - path = url2pathname(path.rstrip(os.path.sep)) - self._arc = pkg.p5p.Archive(path, mode="r") - except pkg.p5p.InvalidArchive as e: - ex = tx.TransportProtoError("file", errno.EINVAL, - reason=str(e), repourl=self._url) - self.__record_proto_error(ex) - raise ex - except Exception as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url) - self.__record_proto_error(ex) - raise ex - - def __record_proto_error(self, ex): - """Private helper function that records a protocol error that - was raised by the class instead of the transport engine. It - records both that a transaction was initiated and that an - error occurred.""" + fs = os.stat(os.path.join(dest, fmri.get_url_path())) + progtrack.manifest_fetch_progress(completion=True) + except pkg.p5p.UnknownPackageManifest as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=fmri, + ) + self.__record_proto_error(ex) + errors.append(ex) + continue + except Exception as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=fmri, + ) + self.__record_proto_error(ex) + errors.append(ex) + continue + return errors + + def get_files( + self, filelist, dest, progtrack, version, header=None, pub=None + ): + """Get multiple files from the repo at once. + The files are named by hash and supplied in filelist. + If dest is specified, download to the destination + directory that is given. If progtrack is not None, + it contains a ProgressTracker object for the + downloads.""" + + pub_prefix = getattr(pub, "prefix", None) + errors = [] + for f in filelist: + try: + self._arc.extract_package_files([f], dest, pub=pub_prefix) + if progtrack: + fs = os.stat(os.path.join(dest, f)) + progtrack.download_add_progress(1, fs.st_size) + except pkg.p5p.UnknownArchiveFiles as e: + ex = tx.TransportProtoError( + "file", + errno.ENOENT, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + errors.append(ex) + continue + except Exception as e: + ex = tx.TransportProtoError( + "file", + errno.EPROTO, + reason=str(e), + repourl=self._url, + request=f, + ) + self.__record_proto_error(ex) + errors.append(ex) + continue + return errors - self.__stats.record_tx() - self.__stats.record_error(decayable=ex.decayable) + def get_url(self): + """Returns the repo's url.""" - def add_version_data(self, verdict): - """Cache the information about what versions a repository - supports.""" + return self._url - self._verdata = verdict + def get_repouri_key(self): + """Returns the repo's RepositoryURI.""" - def get_status(self, header=None, ccancel=None): - """Get the archive status.""" + return self._repouri.key() - pubsinfo = {} - arcdata = { - "repository": { - "publishers": pubsinfo, - "version": self._arc.version, # Version of archive. - } - } + def get_versions(self, header=None, ccancel=None): + """Query the repo for versions information. + Returns a file-like object.""" - for pub in self._arc.get_publishers(): - try: - # Create temporary directory. - tmpdir = tempfile.mkdtemp(prefix="tmp.repo.") - # Get catalog. - self.get_catalog1(["catalog.attrs"], tmpdir, - pub=pub, header=header) - - cat = pkg.catalog.Catalog(meta_root=tmpdir) - pubinfo = {} - pubinfo["last-catalog-update"] = \ - pkg.catalog.datetime_to_basic_ts( - cat.last_modified) - pubinfo["package-count"] = cat.package_count - pubinfo["status"] = "online" - pubsinfo[pub.prefix] = { - "package-count": cat.package_count, - "last-catalog-update": - pkg.catalog.datetime_to_basic_ts( - cat.last_modified), - "status": "online", - } - finally: - # Remove temporary directory if possible. - shutil.rmtree(tmpdir, ignore_errors=True) - - buf = cStringIO() - try: - json.dump(arcdata, buf, ensure_ascii=False, indent=2, - sort_keys=True) - buf.write("\n") - except Exception as e: - reason = "Unable to retrieve status data:\n{0}".format(e) - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=reason, repourl=self._url) - self.__record_proto_error(ex) - raise ex - buf.seek(0) - return buf - - def get_catalog1(self, filelist, destloc, header=None, ts=None, - progtrack=None, pub=None, revalidate=False, redownload=False): - """Get the files that make up the catalog components - that are listed in 'filelist'. Download the files to - the directory specified in 'destloc'. The caller - may optionally specify a dictionary with header - elements in 'header'. If a conditional get is - to be performed, 'ts' should contain a floating point - value of seconds since the epoch. This protocol - doesn't implment revalidate and redownload. The options - are ignored.""" - - pub_prefix = getattr(pub, "prefix", None) - errors = [] - for f in filelist: - try: - self._arc.extract_catalog1(f, destloc, - pub=pub_prefix) - if progtrack: - fs = os.stat(os.path.join(destloc, f)) - progtrack.refresh_progress(pub, - fs.st_size) - except pkg.p5p.UnknownArchiveFiles as e: - ex = tx.TransportProtoError("file", - errno.ENOENT, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - errors.append(ex) - continue - except Exception as e: - ex = tx.TransportProtoError("file", - errno.EPROTO, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - errors.append(ex) - continue - return errors - - def get_datastream(self, fhash, version, header=None, ccancel=None, - pub=None): - """Get a datastream from a repo. The name of the file is given - in fhash.""" - - pub_prefix = getattr(pub, "prefix", None) - try: - return self._arc.get_package_file(fhash, - pub=pub_prefix) - except pkg.p5p.UnknownArchiveFiles as e: - ex = tx.TransportProtoError("file", errno.ENOENT, - reason=str(e), repourl=self._url, request=fhash) - self.__record_proto_error(ex) - raise ex - except Exception as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url, request=fhash) - self.__record_proto_error(ex) - raise ex - - def get_publisherinfo(self, header=None, ccancel=None): - """Get publisher information from the repository.""" + buf = cStringIO() + vops = { + "catalog": ["1"], + "file": ["0"], + "manifest": ["0"], + "publisher": ["0", "1"], + "versions": ["0"], + "status": ["0"], + } - try: - pubs = self._arc.get_publishers() - buf = cStringIO() - p5i.write(buf, pubs) - except Exception as e: - reason = "Unable to retrieve publisher configuration " \ - "data:\n{0}".format(e) - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=reason, repourl=self._url) - self.__record_proto_error(ex) - raise ex - buf.seek(0) - return buf - - def get_manifest(self, fmri, header=None, ccancel=None, pub=None): - """Get a manifest from repo. The fmri of the package for the - manifest is given in fmri.""" + buf.write("pkg-server {0}\n".format(pkg.VERSION)) + buf.write( + "\n".join( + "{0} {1}".format(op, " ".join(vers)) + for op, vers in six.iteritems(vops) + ) + + "\n" + ) + buf.seek(0) + self.__stats.record_tx() + return buf - try: - return self._arc.get_package_manifest(fmri, raw=True) - except pkg.p5p.UnknownPackageManifest as e: - ex = tx.TransportProtoError("file", errno.ENOENT, - reason=str(e), repourl=self._url, request=fmri) - self.__record_proto_error(ex) - raise ex - except Exception as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=self._url, request=fmri) - self.__record_proto_error(ex) - raise ex - - def get_manifests(self, mfstlist, dest, progtrack=None, pub=None): - """Get manifests named in list. The mfstlist argument contains - tuples (fmri, header). This is so that each manifest may have - unique header information. The destination directory is spec- - ified in the dest argument.""" - - errors = [] - for fmri, h in mfstlist: - try: - self._arc.extract_package_manifest(fmri, dest, - filename=fmri.get_url_path()) - if progtrack: - fs = os.stat(os.path.join(dest, - fmri.get_url_path())) - progtrack.manifest_fetch_progress( - completion=True) - except pkg.p5p.UnknownPackageManifest as e: - ex = tx.TransportProtoError("file", - errno.ENOENT, reason=str(e), - repourl=self._url, request=fmri) - self.__record_proto_error(ex) - errors.append(ex) - continue - except Exception as e: - ex = tx.TransportProtoError("file", - errno.EPROTO, reason=str(e), - repourl=self._url, request=fmri) - self.__record_proto_error(ex) - errors.append(ex) - continue - return errors - - def get_files(self, filelist, dest, progtrack, version, header=None, pub=None): - """Get multiple files from the repo at once. - The files are named by hash and supplied in filelist. - If dest is specified, download to the destination - directory that is given. If progtrack is not None, - it contains a ProgressTracker object for the - downloads.""" - - pub_prefix = getattr(pub, "prefix", None) - errors = [] - for f in filelist: - try: - self._arc.extract_package_files([f], dest, - pub=pub_prefix) - if progtrack: - fs = os.stat(os.path.join(dest, f)) - progtrack.download_add_progress(1, - fs.st_size) - except pkg.p5p.UnknownArchiveFiles as e: - ex = tx.TransportProtoError("file", - errno.ENOENT, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - errors.append(ex) - continue - except Exception as e: - ex = tx.TransportProtoError("file", - errno.EPROTO, reason=str(e), - repourl=self._url, request=f) - self.__record_proto_error(ex) - errors.append(ex) - continue - return errors - - def get_url(self): - """Returns the repo's url.""" - - return self._url - - def get_repouri_key(self): - """Returns the repo's RepositoryURI.""" - - return self._repouri.key() - - def get_versions(self, header=None, ccancel=None): - """Query the repo for versions information. - Returns a file-like object.""" - - buf = cStringIO() - vops = { - "catalog": ["1"], - "file": ["0"], - "manifest": ["0"], - "publisher": ["0", "1"], - "versions": ["0"], - "status": ["0"] - } + def has_version_data(self): + """Returns true if this repo knows its version information.""" - buf.write("pkg-server {0}\n".format(pkg.VERSION)) - buf.write("\n".join( - "{0} {1}".format(op, " ".join(vers)) - for op, vers in six.iteritems(vops) - ) + "\n") - buf.seek(0) - self.__stats.record_tx() - return buf + return self._verdata is not None - def has_version_data(self): - """Returns true if this repo knows its version information.""" + def supports_version(self, op, verlist): + """Returns version-id of highest supported version. + If the version is not supported, or no data is available, + -1 is returned instead.""" - return self._verdata is not None + if not self.has_version_data() or op not in self._verdata: + return -1 - def supports_version(self, op, verlist): - """Returns version-id of highest supported version. - If the version is not supported, or no data is available, - -1 is returned instead.""" + # This code assumes that both the verlist and verdata + # are sorted in reverse order. This behavior is currently + # implemented in the transport code. - if not self.has_version_data() or op not in self._verdata: - return -1 + for v in verlist: + if v in self._verdata[op]: + return v + return -1 - # This code assumes that both the verlist and verdata - # are sorted in reverse order. This behavior is currently - # implemented in the transport code. + def touch_manifest(self, mfst, header=None, ccancel=None, pub=None): + """No-op.""" + return True - for v in verlist: - if v in self._verdata[op]: - return v - return -1 + def build_refetch_header(self, header): + """Pointless to attempt refetch of corrupt content for + this protocol.""" + return header - def touch_manifest(self, mfst, header=None, ccancel=None, pub=None): - """No-op.""" - return True - def build_refetch_header(self, header): - """Pointless to attempt refetch of corrupt content for - this protocol.""" - return header +class FileRepo(object): + """Factory class for creating transport repository objects for + filesystem-based repository sources. + """ + def __new__(cls, repostats, repouri, engine, frepo=None): + """Returns a new transport repository object based on the + provided information. -class FileRepo(object): - """Factory class for creating transport repository objects for - filesystem-based repository sources. - """ + 'repostats' is a RepoStats object. - def __new__(cls, repostats, repouri, engine, frepo=None): - """Returns a new transport repository object based on the - provided information. + 'repouri' is a TransportRepoURI object. - 'repostats' is a RepoStats object. + 'engine' is a transport engine object. - 'repouri' is a TransportRepoURI object. + 'frepo' is an optional Repository object to use instead + of creating one. - 'engine' is a transport engine object. + The convenience function new_repo() can be used to create + the correct repo.""" - 'frepo' is an optional Repository object to use instead - of creating one. + try: + scheme, netloc, path, params, query, fragment = urlparse( + repouri.uri, "file", allow_fragments=0 + ) + path = url2pathname(path) + except Exception as e: + ex = tx.TransportProtoError( + "file", errno.EPROTO, reason=str(e), repourl=repostats.url + ) + repostats.record_tx() + repostats.record_error(decayable=ex.decayable) + raise ex - The convenience function new_repo() can be used to create - the correct repo.""" + # Path must be rstripped of separators for this check to + # succeed. + if not frepo and os.path.isfile(path.rstrip(os.path.sep)): + # Assume target is a repository archive. + return _ArchiveRepo(repostats, repouri, engine) - try: - scheme, netloc, path, params, query, fragment = \ - urlparse(repouri.uri, "file", - allow_fragments=0) - path = url2pathname(path) - except Exception as e: - ex = tx.TransportProtoError("file", errno.EPROTO, - reason=str(e), repourl=repostats.url) - repostats.record_tx() - repostats.record_error(decayable=ex.decayable) - raise ex - - # Path must be rstripped of separators for this check to - # succeed. - if not frepo and os.path.isfile(path.rstrip(os.path.sep)): - # Assume target is a repository archive. - return _ArchiveRepo(repostats, repouri, engine) - - # Assume target is a filesystem repository. - return _FilesystemRepo(repostats, repouri, engine, frepo=frepo) + # Assume target is a filesystem repository. + return _FilesystemRepo(repostats, repouri, engine, frepo=frepo) # ProgressCallback objects that bridge the interfaces between ProgressTracker, # and the necessary callbacks for the TransportEngine. + class ProgressCallback(object): - """This class bridges the interfaces between a ProgressTracker - object and the progress callback that's provided by Pycurl. - Since progress callbacks are per curl handle, and handles aren't - guaranteed to succeed, this object watches a handle's progress - and updates the tracker accordingly.""" + """This class bridges the interfaces between a ProgressTracker + object and the progress callback that's provided by Pycurl. + Since progress callbacks are per curl handle, and handles aren't + guaranteed to succeed, this object watches a handle's progress + and updates the tracker accordingly.""" + + def __init__(self, progtrack): + self.progtrack = progtrack - def __init__(self, progtrack): - self.progtrack = progtrack + def abort(self): + """Download failed.""" + pass - def abort(self): - """Download failed.""" - pass + def commit(self, size): + """This download has succeeded. The size argument is + the total size that the client received.""" + pass - def commit(self, size): - """This download has succeeded. The size argument is - the total size that the client received.""" - pass + def progress_callback(self, dltot, dlcur, ultot, ulcur): + """Called by pycurl/libcurl framework to update + progress tracking.""" - def progress_callback(self, dltot, dlcur, ultot, ulcur): - """Called by pycurl/libcurl framework to update - progress tracking.""" + if ( + hasattr(self.progtrack, "check_cancelation") + and self.progtrack.check_cancelation() + ): + return -1 - if hasattr(self.progtrack, "check_cancelation") and \ - self.progtrack.check_cancelation(): - return -1 + return 0 - return 0 class CatalogProgress(ProgressCallback): - """This class bridges the interfaces between a ProgressTracker's - refresh code and the progress callback for that's provided by Pycurl.""" + """This class bridges the interfaces between a ProgressTracker's + refresh code and the progress callback for that's provided by Pycurl.""" - def __init__(self, progtrack): - ProgressCallback.__init__(self, progtrack) - self.dltotal = 0 - self.dlcurrent = 0 - self.completed = False + def __init__(self, progtrack): + ProgressCallback.__init__(self, progtrack) + self.dltotal = 0 + self.dlcurrent = 0 + self.completed = False - def abort(self): - """Download failed. Remove the amount of bytes downloaded - by this file from the ProgressTracker.""" + def abort(self): + """Download failed. Remove the amount of bytes downloaded + by this file from the ProgressTracker.""" - self.progtrack.refresh_progress(None, -self.dlcurrent) - self.completed = True + self.progtrack.refresh_progress(None, -self.dlcurrent) + self.completed = True - def commit(self, size): - # - # This callback is not interesting to us because - # catalogs are stored uncompressed-- and size is the - # size of the resultant object on disk, not the total - # xfer size. - # - pass + def commit(self, size): + # + # This callback is not interesting to us because + # catalogs are stored uncompressed-- and size is the + # size of the resultant object on disk, not the total + # xfer size. + # + pass + def progress_callback(self, dltot, dlcur, ultot, ulcur): + """Called by pycurl/libcurl framework to update + progress tracking.""" - def progress_callback(self, dltot, dlcur, ultot, ulcur): - """Called by pycurl/libcurl framework to update - progress tracking.""" + if ( + hasattr(self.progtrack, "check_cancelation") + and self.progtrack.check_cancelation() + ): + return -1 - if hasattr(self.progtrack, "check_cancelation") and \ - self.progtrack.check_cancelation(): - return -1 + if self.dltotal != dltot: + self.dltotal = dltot - if self.dltotal != dltot: - self.dltotal = dltot + new_progress = int(dlcur - self.dlcurrent) + if new_progress > 0: + self.dlcurrent += new_progress + self.progtrack.refresh_progress(None, new_progress) - new_progress = int(dlcur - self.dlcurrent) - if new_progress > 0: - self.dlcurrent += new_progress - self.progtrack.refresh_progress(None, new_progress) + return 0 - return 0 class ManifestProgress(ProgressCallback): - """This class bridges the interfaces between a ProgressTracker's - manifest fetching code and the progress callback for that's provided by - Pycurl.""" + """This class bridges the interfaces between a ProgressTracker's + manifest fetching code and the progress callback for that's provided by + Pycurl.""" - def abort(self): - """Download failed. Remove the amount of bytes downloaded - by this file from the ProgressTracker.""" - pass + def abort(self): + """Download failed. Remove the amount of bytes downloaded + by this file from the ProgressTracker.""" + pass - def commit(self, size): - """Indicate that this download has succeeded.""" - self.progtrack.manifest_fetch_progress(completion=True) + def commit(self, size): + """Indicate that this download has succeeded.""" + self.progtrack.manifest_fetch_progress(completion=True) - def progress_callback(self, dltot, dlcur, ultot, ulcur): - """Called by pycurl/libcurl framework to update - progress tracking.""" + def progress_callback(self, dltot, dlcur, ultot, ulcur): + """Called by pycurl/libcurl framework to update + progress tracking.""" + + if ( + hasattr(self.progtrack, "check_cancelation") + and self.progtrack.check_cancelation() + ): + return -1 + self.progtrack.manifest_fetch_progress(completion=False) + return 0 - if hasattr(self.progtrack, "check_cancelation") and \ - self.progtrack.check_cancelation(): - return -1 - self.progtrack.manifest_fetch_progress(completion=False) - return 0 class FileProgress(ProgressCallback): - """This class bridges the interfaces between a ProgressTracker - object and the progress callback that's provided by Pycurl. - Since progress callbacks are per curl handle, and handles aren't - guaranteed to succeed, this object watches a handle's progress - and updates the tracker accordingly. If the handle fails, - it will correctly remove the bytes from the file. The curl - callback reports bytes even when it doesn't make progress. - It's necessary to keep additonal state here, since the client's - ProgressTracker has global counts of the bytes. If we're - unable to keep a per-file count, the numbers will get - lost quickly.""" - - def __init__(self, progtrack): - ProgressCallback.__init__(self, progtrack) - self.dltotal = 0 - self.dlcurrent = 0 - self.ultotal = 0 - self.ulcurrent = 0 - self.completed = False - - def abort(self): - """Download failed. Remove the amount of bytes downloaded - by this file from the ProgressTracker.""" - - self.progtrack.download_add_progress(0, -self.dlcurrent) - self.progtrack.upload_add_progress(-self.ulcurrent) - self.completed = True - - def commit(self, size): - """Indicate that this download has succeeded. The size - argument is the total size that we received. Compare this - value against the dlcurrent. If it's out of sync, which - can happen if the underlying framework swaps our request - across connections, adjust the progress tracker by the - amount we're off.""" - - adjustment = int(size - self.dlcurrent) - - self.progtrack.download_add_progress(1, adjustment) - self.completed = True - - def progress_callback(self, dltot, dlcur, ultot, ulcur): - """Called by pycurl/libcurl framework to update - progress tracking.""" - - if hasattr(self.progtrack, "check_cancelation") and \ - self.progtrack.check_cancelation(): - return -1 - - if self.completed: - return 0 - - if self.dltotal != dltot: - self.dltotal = dltot - - new_progress = int(dlcur - self.dlcurrent) - if new_progress > 0: - self.dlcurrent += new_progress - self.progtrack.download_add_progress(0, new_progress) - - if self.ultotal != ultot: - self.ultotal = ultot - - new_progress = int(ulcur - self.ulcurrent) - if new_progress > 0: - self.ulcurrent += new_progress - self.progtrack.upload_add_progress(new_progress) - - return 0 + """This class bridges the interfaces between a ProgressTracker + object and the progress callback that's provided by Pycurl. + Since progress callbacks are per curl handle, and handles aren't + guaranteed to succeed, this object watches a handle's progress + and updates the tracker accordingly. If the handle fails, + it will correctly remove the bytes from the file. The curl + callback reports bytes even when it doesn't make progress. + It's necessary to keep additonal state here, since the client's + ProgressTracker has global counts of the bytes. If we're + unable to keep a per-file count, the numbers will get + lost quickly.""" + + def __init__(self, progtrack): + ProgressCallback.__init__(self, progtrack) + self.dltotal = 0 + self.dlcurrent = 0 + self.ultotal = 0 + self.ulcurrent = 0 + self.completed = False + + def abort(self): + """Download failed. Remove the amount of bytes downloaded + by this file from the ProgressTracker.""" + + self.progtrack.download_add_progress(0, -self.dlcurrent) + self.progtrack.upload_add_progress(-self.ulcurrent) + self.completed = True + + def commit(self, size): + """Indicate that this download has succeeded. The size + argument is the total size that we received. Compare this + value against the dlcurrent. If it's out of sync, which + can happen if the underlying framework swaps our request + across connections, adjust the progress tracker by the + amount we're off.""" + + adjustment = int(size - self.dlcurrent) + + self.progtrack.download_add_progress(1, adjustment) + self.completed = True + + def progress_callback(self, dltot, dlcur, ultot, ulcur): + """Called by pycurl/libcurl framework to update + progress tracking.""" + + if ( + hasattr(self.progtrack, "check_cancelation") + and self.progtrack.check_cancelation() + ): + return -1 + + if self.completed: + return 0 + + if self.dltotal != dltot: + self.dltotal = dltot + + new_progress = int(dlcur - self.dlcurrent) + if new_progress > 0: + self.dlcurrent += new_progress + self.progtrack.download_add_progress(0, new_progress) + + if self.ultotal != ultot: + self.ultotal = ultot + + new_progress = int(ulcur - self.ulcurrent) + if new_progress > 0: + self.ulcurrent += new_progress + self.progtrack.upload_add_progress(new_progress) + + return 0 # cache transport repo objects, so one isn't created on every operation + class RepoCache(object): - """An Object that caches repository objects. Used to make - sure that repos are re-used instead of re-created for each - operation. The objects are keyed by TransportRepoURI.key() - objects.""" - - # Schemes supported by the cache. - supported_schemes = { - "file": FileRepo, - "http": HTTPRepo, - "https": HTTPSRepo, - } + """An Object that caches repository objects. Used to make + sure that repos are re-used instead of re-created for each + operation. The objects are keyed by TransportRepoURI.key() + objects.""" - update_schemes = { - "file": FileRepo - } + # Schemes supported by the cache. + supported_schemes = { + "file": FileRepo, + "http": HTTPRepo, + "https": HTTPSRepo, + } + + update_schemes = {"file": FileRepo} + + def __init__(self, engine): + """Caller must include a TransportEngine.""" - def __init__(self, engine): - """Caller must include a TransportEngine.""" + self.__engine = engine + self.__cache = {} - self.__engine = engine - self.__cache = {} + def __contains__(self, repouri): + return repouri.key() in self.__cache - def __contains__(self, repouri): - return repouri.key() in self.__cache + def clear_cache(self): + """Flush the contents of the cache.""" - def clear_cache(self): - """Flush the contents of the cache.""" + self.__cache = {} - self.__cache = {} + def new_repo(self, repostats, repouri): + """Create a new repo server for the given repouri object.""" - def new_repo(self, repostats, repouri): - """Create a new repo server for the given repouri object.""" + scheme = repouri.scheme - scheme = repouri.scheme + if scheme not in RepoCache.supported_schemes: + raise tx.TransportOperationError( + "Scheme {0} not" " supported by transport.".format(scheme) + ) - if scheme not in RepoCache.supported_schemes: - raise tx.TransportOperationError("Scheme {0} not" - " supported by transport.".format(scheme)) + if repouri.key() in self.__cache: + return self.__cache[repouri.key()] - if repouri.key() in self.__cache: - return self.__cache[repouri.key()] + repo = RepoCache.supported_schemes[scheme]( + repostats, repouri, self.__engine + ) - repo = RepoCache.supported_schemes[scheme](repostats, repouri, - self.__engine) + self.__cache[repouri.key()] = repo + return repo - self.__cache[repouri.key()] = repo - return repo + def update_repo(self, repostats, repouri, repository): + """For the FileRepo, some callers need to update its + Repository object. They should use this method to do so. + If the Repo isn't in the cache, it's created and added.""" - def update_repo(self, repostats, repouri, repository): - """For the FileRepo, some callers need to update its - Repository object. They should use this method to do so. - If the Repo isn't in the cache, it's created and added.""" + scheme = repouri.scheme - scheme = repouri.scheme + if scheme not in RepoCache.update_schemes: + return - if scheme not in RepoCache.update_schemes: - return + if repouri.key() in self.__cache: + repo = self.__cache[repouri.key()] + repo._frepo = repository + return - if repouri.key() in self.__cache: - repo = self.__cache[repouri.key()] - repo._frepo = repository - return + repo = RepoCache.update_schemes[scheme]( + repostats, repouri, self.__engine, frepo=repository + ) - repo = RepoCache.update_schemes[scheme](repostats, repouri, - self.__engine, frepo=repository) + self.__cache[repouri.key()] = repo - self.__cache[repouri.key()] = repo + def remove_repo(self, repo=None, url=None): + """Remove a repo from the cache. Caller must supply + either a TransportRepoURI object or a URL.""" + self.contents() - def remove_repo(self, repo=None, url=None): - """Remove a repo from the cache. Caller must supply - either a TransportRepoURI object or a URL.""" - self.contents() + if repo: + repouri = repo + if url: + repouri = TransportRepoURI(url) + else: + raise ValueError("Must supply either a repo or a uri.") - if repo: - repouri = repo - if url: - repouri = TransportRepoURI(url) - else: - raise ValueError("Must supply either a repo or a uri.") + if repouri.key() in self.__cache: + del self.__cache[repouri.key()] - if repouri.key() in self.__cache: - del self.__cache[repouri.key()] # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/stats.py b/src/modules/client/transport/stats.py index bcdb1d504..7d154f217 100644 --- a/src/modules/client/transport/stats.py +++ b/src/modules/client/transport/stats.py @@ -34,517 +34,547 @@ class RepoChooser(object): - """An object that contains repo statistics. It applies algorithms - to choose an optimal set of repos for a given publisher, based - upon the observed repo statistics. - - The RepoChooser object is a container for RepoStats objects. - It's used to return the RepoStats in an ordered list, which - helps the transport pick the best performing destination.""" - - def __init__(self): - # A dictionary containing the RepoStats objects. The dictionary - # uses TransportRepoURI.key() values as its key. - self.__rsobj = {} - - def __getitem__(self, key): - return self.__rsobj[key] - - def __contains__(self, key): - return key in self.__rsobj - - def __get_proxy(self, ds): - """Gets the proxy that was used at runtime for a given - RepoStats object. This may differ from the persistent - configuration of any given TransportRepoURI. We do not use the - ds.runtime_proxy value, since that may include cleartext - passwords.""" - - # we don't allow the proxy used for the system publisher to be - # overridden - if ds.system: - if not ds.proxy: - return "-" - return ds.proxy - - proxy = misc.get_runtime_proxy(ds.proxy, ds.url) - if not proxy: - return "-" - return proxy - - def dump(self): - """Write the repo statistics to stdout.""" - - hfmt = "{0:41.41} {1:30} {2:6} {3:4} {4:4} {5:8} {6:10} {7:5} {8:7} {9:4}" - dfmt = "{0:41.41} {1:30} {2:6} {3:4} {4:4} {5:8} {6:10} {7:5} {8:6f} {9:4}" - misc.msg(hfmt.format("URL", "Proxy", "Good", "Err", "Conn", - "Speed", "Size", "Used", "CSpeed", "Qual")) - - for ds in self.__rsobj.values(): - - speedstr = misc.bytes_to_str(ds.transfer_speed, - "{num:>.0f} {unit}/s") - - sizestr = misc.bytes_to_str(ds.bytes_xfr) - proxy = self.__get_proxy(ds) - misc.msg(dfmt.format(ds.url, proxy, ds.success, - ds.failures, ds.num_connect, speedstr, sizestr, - ds.used, ds.connect_time, ds.quality)) - - def get_num_visited(self, repouri_list): - """Walk a list of TransportRepoURIs and return the number - that have been visited as an integer. If a repository - is in the list, but we don't know about it yet, create a - stats object to keep track of it, and include it in - the visited count.""" - - found_rs = [] - - for ruri in repouri_list: - key = ruri.key() - if key in self.__rsobj: - rs = self.__rsobj[key] - else: - rs = RepoStats(ruri) - self.__rsobj[key] = rs - found_rs.append((rs, ruri)) - - return len([x for x in found_rs if x[0].used]) - - def get_repostats(self, repouri_list, origin_list=misc.EmptyI): - """Walk a list of TransportRepoURIs and return a sorted list of - status objects. The better choices should be at the - beginning of the list.""" - - found_rs = [] - origin_speed = 0 - origin_count = 0 - origin_avg_speed = 0 - origin_cspeed = 0 - origin_ccount = 0 - origin_avg_cspeed = 0 - - for ouri in origin_list: - key = ouri.key() - if key in self.__rsobj: - rs = self.__rsobj[key] - if rs.bytes_xfr > 0: - # Exclude sources that don't - # contribute to transfer speed. - origin_speed += rs.transfer_speed - origin_count += 1 - if rs.connect_time > 0: - # Exclude sources that don't - # contribute to connection - # time. - origin_cspeed += rs.connect_time - origin_ccount += 1 - else: - rs = RepoStats(ouri) - self.__rsobj[key] = rs - - if origin_count > 0: - origin_avg_speed = origin_speed // origin_count - if origin_ccount > 0: - origin_avg_cspeed = origin_cspeed // origin_ccount - - # Walk the list of repouris that we were provided. - # If they're already in the dictionary, copy a reference - # into the found_rs list, otherwise create the object - # and then add it to our list of found objects. - num_origins = len(origin_list) - num_mirrors = len(repouri_list) - len(origin_list) - o_idx = 0 - m_idx = 0 - for ruri in repouri_list: - key = ruri.key() - if key in self.__rsobj: - rs = self.__rsobj[key] - else: - rs = RepoStats(ruri) - self.__rsobj[key] = rs - found_rs.append((rs, ruri)) - if ruri in origin_list: - n = num_origins - o_idx - # old-division; pylint: disable=W1619 - rs.origin_factor = n / num_origins - o_idx += 1 - else: - n = num_mirrors - m_idx - # old-division; pylint: disable=W1619 - rs.origin_factor = n / num_mirrors - m_idx += 1 - if origin_count > 0: - rs.origin_speed = origin_avg_speed - rs.origin_count = origin_count - if origin_ccount > 0: - rs.origin_cspeed = origin_avg_cspeed - - # Decay error rate for transient errors. - # Reduce the error penalty by .1% each iteration. - # In other words, keep 99.9% of the current value. - rs._err_decay *= 0.999 - # Decay origin bonus each iteration to gradually give - # up slow and erroneous origins. - rs.origin_decay *= 0.95 - - found_rs.sort(key=lambda x: x[0].quality, reverse=True) - - # list of tuples, (repostatus, repouri) - return found_rs - - def clear(self): - """Clear all statistics count.""" - - self.__rsobj = {} - - def reset(self): - """reset each stats object""" - - for v in self.__rsobj.values(): - v.reset() - - -class RepoStats(object): - """An object for keeping track of observed statistics for a particular - TransportRepoURI. This includes things like observed performance, - availability, successful and unsuccessful transaction rates, etc. - - There's one RepoStats object per transport destination. - This allows the transport to keep statistics about each - host that it visits.""" - - def __init__(self, repouri): - """Initialize a RepoStats object. Pass a TransportRepoURI - object in repouri to configure an object for a particular - repository URI.""" - - self.__url = repouri.uri.rstrip("/") - self.__scheme = urlsplit(self.__url)[0] - self.__priority = repouri.priority - - self.__proxy = repouri.proxy - self.__system = repouri.system - - self._err_decay = 0 - self.__failed_tx = 0 - self.__content_err = 0 - self.__decayable_err = 0 - self.__timeout_err = 0 - self.__total_tx = 0 - self.__consecutive_errors = 0 - - self.__connections = 0 - self.__connect_time = 0.0 - - self.__used = False - - self.__bytes_xfr = 0.0 - self.__seconds_xfr = 0.0 - self.origin_speed = 0.0 - self.origin_cspeed = 0.0 - self.origin_count = 1 - self.origin_factor = 1 - self.origin_decay = 1 - - def clear_consecutive_errors(self): - """Set the count of consecutive errors to zero. This is - done once we know a transaction has been successfully - completed.""" - - self.__consecutive_errors = 0 - - def record_connection(self, time): - """Record amount of time spent connecting.""" - - if not self.__used: - self.__used = True - - self.__connections += 1 - self.__connect_time += time - - def record_error(self, decayable=False, content=False, timeout=False): - """Record that an operation to the TransportRepoURI represented - by this RepoStats object failed with an error. - - Set decayable to true if the error is a transient - error that may be decayed by the stats framework. - - Set content to true if the error is caused by - corrupted or invalid content.""" - - if not self.__used: - self.__used = True - - self.__consecutive_errors += 1 - if decayable: - self.__decayable_err += 1 - self._err_decay += 1 - elif content: - self.__content_err += 1 - else: - self.__failed_tx += 1 - # A timeout may be decayable or not, so track it in addition - # to the other classes of errors. - if timeout: - self.__timeout_err += 1 - - - def record_progress(self, bytes, seconds): - """Record time and size of a network operation to a - particular TransportRepoURI, represented by the RepoStats - object. - Place the number of bytes transferred in the bytes argument. - The time, in seconds, should be supplied in the - seconds argument.""" - - if not self.__used: - self.__used = True - self.__bytes_xfr += bytes - self.__seconds_xfr += seconds - - def record_tx(self): - """Record that an operation to the URI represented - by this RepoStats object was initiated.""" - - if not self.__used: - self.__used = True - self.__total_tx += 1 - - def reset(self): - """Reset transport stats in preparation for next operation.""" - - # The connection stats (such as number, cspeed, time) are not - # reset because the metadata bandwidth calculation would be - # skewed when picking a host that gives us fast data. In that - # case, keeping track of the latency helps quality make a - # better choice. - self.__bytes_xfr = 0.0 - self.__seconds_xfr = 0.0 - self.__failed_tx = 0 - self.__content_err = 0 - self.__decayable_err = 0 - self._err_decay = 0 - self.__total_tx = 0 - self.__consecutive_errors = 0 - self.origin_speed = 0.0 - - @property - def bytes_xfr(self): - """Return the number of bytes transferred.""" - - return self.__bytes_xfr - - @property - def connect_time(self): - """The average connection time for this host.""" - - if self.__connections == 0: - if self.__used and self.__timeout_err > 0: - return 1.0 - else: - return 0.0 - + """An object that contains repo statistics. It applies algorithms + to choose an optimal set of repos for a given publisher, based + upon the observed repo statistics. + + The RepoChooser object is a container for RepoStats objects. + It's used to return the RepoStats in an ordered list, which + helps the transport pick the best performing destination.""" + + def __init__(self): + # A dictionary containing the RepoStats objects. The dictionary + # uses TransportRepoURI.key() values as its key. + self.__rsobj = {} + + def __getitem__(self, key): + return self.__rsobj[key] + + def __contains__(self, key): + return key in self.__rsobj + + def __get_proxy(self, ds): + """Gets the proxy that was used at runtime for a given + RepoStats object. This may differ from the persistent + configuration of any given TransportRepoURI. We do not use the + ds.runtime_proxy value, since that may include cleartext + passwords.""" + + # we don't allow the proxy used for the system publisher to be + # overridden + if ds.system: + if not ds.proxy: + return "-" + return ds.proxy + + proxy = misc.get_runtime_proxy(ds.proxy, ds.url) + if not proxy: + return "-" + return proxy + + def dump(self): + """Write the repo statistics to stdout.""" + + hfmt = ( + "{0:41.41} {1:30} {2:6} {3:4} {4:4} {5:8} {6:10} {7:5} {8:7} {9:4}" + ) + dfmt = ( + "{0:41.41} {1:30} {2:6} {3:4} {4:4} {5:8} {6:10} {7:5} {8:6f} {9:4}" + ) + misc.msg( + hfmt.format( + "URL", + "Proxy", + "Good", + "Err", + "Conn", + "Speed", + "Size", + "Used", + "CSpeed", + "Qual", + ) + ) + + for ds in self.__rsobj.values(): + speedstr = misc.bytes_to_str( + ds.transfer_speed, "{num:>.0f} {unit}/s" + ) + + sizestr = misc.bytes_to_str(ds.bytes_xfr) + proxy = self.__get_proxy(ds) + misc.msg( + dfmt.format( + ds.url, + proxy, + ds.success, + ds.failures, + ds.num_connect, + speedstr, + sizestr, + ds.used, + ds.connect_time, + ds.quality, + ) + ) + + def get_num_visited(self, repouri_list): + """Walk a list of TransportRepoURIs and return the number + that have been visited as an integer. If a repository + is in the list, but we don't know about it yet, create a + stats object to keep track of it, and include it in + the visited count.""" + + found_rs = [] + + for ruri in repouri_list: + key = ruri.key() + if key in self.__rsobj: + rs = self.__rsobj[key] + else: + rs = RepoStats(ruri) + self.__rsobj[key] = rs + found_rs.append((rs, ruri)) + + return len([x for x in found_rs if x[0].used]) + + def get_repostats(self, repouri_list, origin_list=misc.EmptyI): + """Walk a list of TransportRepoURIs and return a sorted list of + status objects. The better choices should be at the + beginning of the list.""" + + found_rs = [] + origin_speed = 0 + origin_count = 0 + origin_avg_speed = 0 + origin_cspeed = 0 + origin_ccount = 0 + origin_avg_cspeed = 0 + + for ouri in origin_list: + key = ouri.key() + if key in self.__rsobj: + rs = self.__rsobj[key] + if rs.bytes_xfr > 0: + # Exclude sources that don't + # contribute to transfer speed. + origin_speed += rs.transfer_speed + origin_count += 1 + if rs.connect_time > 0: + # Exclude sources that don't + # contribute to connection + # time. + origin_cspeed += rs.connect_time + origin_ccount += 1 + else: + rs = RepoStats(ouri) + self.__rsobj[key] = rs + + if origin_count > 0: + origin_avg_speed = origin_speed // origin_count + if origin_ccount > 0: + origin_avg_cspeed = origin_cspeed // origin_ccount + + # Walk the list of repouris that we were provided. + # If they're already in the dictionary, copy a reference + # into the found_rs list, otherwise create the object + # and then add it to our list of found objects. + num_origins = len(origin_list) + num_mirrors = len(repouri_list) - len(origin_list) + o_idx = 0 + m_idx = 0 + for ruri in repouri_list: + key = ruri.key() + if key in self.__rsobj: + rs = self.__rsobj[key] + else: + rs = RepoStats(ruri) + self.__rsobj[key] = rs + found_rs.append((rs, ruri)) + if ruri in origin_list: + n = num_origins - o_idx # old-division; pylint: disable=W1619 - return self.__connect_time / self.__connections - - @property - def consecutive_errors(self): - """Return the number of successive errors this endpoint - has encountered.""" - - return self.__consecutive_errors - - @property - def failures(self): - """Return the number of failures that the client has encountered - while trying to perform operations on this repository.""" - - return self.__failed_tx + self.__content_err + \ - self.__decayable_err - - @property - def content_errors(self): - """Return the number of content errors that the client has - encountered while trying to perform operation on this - repository.""" - - return self.__content_err - - @property - def num_connect(self): - """Return the number of times that the host has had a - connection established. This is less than or equal to the - number of transactions.""" - - return self.__connections - - @property - def priority(self): - """Return the priority of the URI, if one is assigned.""" - - if self.__priority is None: - return 0 - - return self.__priority - - @property - def scheme(self): - """Return the scheme of the RepoURI. (e.g. http, file.)""" - - return self.__scheme - - @property - def quality(self): - """Return the quality, as an integer value, of the - repository. A higher value means better quality. - - This particular implementation of quality() contains - a random term. Two successive calls to this function - may return different values.""" - - Nused = 20 - Cused = 10 - - Cspeed = 100 - Cconn_speed = 66 - Cerror = 500 - Ccontent_err = 1000 - Crand_max = 20 - Cospeed_none = 100000 - Cocspeed_none = 1 - - if self.origin_speed > 0: - ospeed = self.origin_speed - else: - ospeed = Cospeed_none - - if self.origin_cspeed > 0: - ocspeed = self.origin_cspeed - else: - ocspeed = Cocspeed_none - - # This function applies a bonus to hosts that have little or - # no usage. It started out life as a Heaviside step function, - # but it has since been adjusted so that it scales back the - # bonus as the host approaches the limit where the bonus - # is applied. Hosts with no use recieve the largest bonus, - # while hosts at <= Nused transactions receive the none. - def unused_bonus(self): - tx = 0 - - tx = self.__total_tx - - if tx < 0: - return 0 - - if tx < Nused: - return Cused * (Nused - tx)**2 - - return 0 - - def origin_order_bonus(self): - b = Cspeed * (self.origin_count) ** 2 + Nused ** 2 * \ - Cused - return self.origin_factor * b * self.origin_decay - - - # - # Quality function: - # - # This function presents the quality of a repository as an - # integer value. The quality is determined by observing - # different aspects of the repository's performance. This - # includes how often it has been used, the transfer speed, the - # connect speed, and the number of errors classified by type. - # - # The equation is currently defined as: - # - # Q = Origin_order_bonus() + Unused_bonus() + Cspeed * - # ((bytes/.001+seconds) / origin_speed)^2 + random_bonus( - # Crand_max) - Cconn_speed * (connect_speed / - # origin_connect_speed)^2 - Ccontent_error * (content_errors)^2 - # - Cerror * (non_decayable_errors + value_of_decayed_errors)^2 - # - # Unused_bonus = Cused * (MaxUsed - total tx)^2 if total_tx - # is less than MaxUsed, otherwise return 0. - # - # random_bonus is a gaussian distribution where random_max is - # set as the argument for the stddev. Most numbers generated - # will fall between 0 and -/+ random_max, but some will fall - # outside of the first standard deviation. - # - # The constants were derived by live testing, and using - # a simulated environment. - # + rs.origin_factor = n / num_origins + o_idx += 1 + else: + n = num_mirrors - m_idx # old-division; pylint: disable=W1619 - q = origin_order_bonus(self) + unused_bonus(self) + \ - (Cspeed * ((self.__bytes_xfr / (.001 + self.__seconds_xfr)) - / ospeed)**2) + \ - int(random.gauss(0, Crand_max)) - \ - (Cconn_speed * (self.connect_time / ocspeed)**2) - \ - (Ccontent_err * (self.__content_err)**2) - \ - (Cerror * (self.__failed_tx + self._err_decay)**2) - return int(q) + rs.origin_factor = n / num_mirrors + m_idx += 1 + if origin_count > 0: + rs.origin_speed = origin_avg_speed + rs.origin_count = origin_count + if origin_ccount > 0: + rs.origin_cspeed = origin_avg_cspeed - @property - def seconds_xfr(self): - """Return the total amount of time elapsed while performing - operations against this host.""" + # Decay error rate for transient errors. + # Reduce the error penalty by .1% each iteration. + # In other words, keep 99.9% of the current value. + rs._err_decay *= 0.999 + # Decay origin bonus each iteration to gradually give + # up slow and erroneous origins. + rs.origin_decay *= 0.95 - return self.__seconds_xfr + found_rs.sort(key=lambda x: x[0].quality, reverse=True) - @property - def success(self): - """Return the number of successful transaction that this client - has performed while communicating with this repository.""" + # list of tuples, (repostatus, repouri) + return found_rs - return self.__total_tx - (self.__failed_tx + - self.__content_err + self.__decayable_err) + def clear(self): + """Clear all statistics count.""" + self.__rsobj = {} - @property - def transfer_speed(self): - """Return the average transfer speed in bytes/sec for - operations against this uri.""" + def reset(self): + """reset each stats object""" - if self.__seconds_xfr == 0: - return 0.0 + for v in self.__rsobj.values(): + v.reset() - # old-division; pylint: disable=W1619 - return self.__bytes_xfr / self.__seconds_xfr - @property - def url(self): - """Return the URL that identifies the repository that we're - keeping statistics about.""" - - return self.__url - - @property - def proxy(self): - """Return the default proxy being used to contact the repository - that we're keeping statistics about. Note that OS - environment variables, "http_proxy", "https_proxy", "all_proxy" - and "no_proxy" values will override this value in - pkg.client.transport.engine.""" - - return self.__proxy - - @property - def system(self): - """Return whether these statistics are being used to track the - system publisher, in which case, we always use the proxy - provided rather than proxy environment variables.""" - - return self.__system - - @property - def used(self): - """A boolean value that indicates whether the URI - has been used for network operations.""" +class RepoStats(object): + """An object for keeping track of observed statistics for a particular + TransportRepoURI. This includes things like observed performance, + availability, successful and unsuccessful transaction rates, etc. + + There's one RepoStats object per transport destination. + This allows the transport to keep statistics about each + host that it visits.""" + + def __init__(self, repouri): + """Initialize a RepoStats object. Pass a TransportRepoURI + object in repouri to configure an object for a particular + repository URI.""" + + self.__url = repouri.uri.rstrip("/") + self.__scheme = urlsplit(self.__url)[0] + self.__priority = repouri.priority + + self.__proxy = repouri.proxy + self.__system = repouri.system + + self._err_decay = 0 + self.__failed_tx = 0 + self.__content_err = 0 + self.__decayable_err = 0 + self.__timeout_err = 0 + self.__total_tx = 0 + self.__consecutive_errors = 0 + + self.__connections = 0 + self.__connect_time = 0.0 + + self.__used = False + + self.__bytes_xfr = 0.0 + self.__seconds_xfr = 0.0 + self.origin_speed = 0.0 + self.origin_cspeed = 0.0 + self.origin_count = 1 + self.origin_factor = 1 + self.origin_decay = 1 + + def clear_consecutive_errors(self): + """Set the count of consecutive errors to zero. This is + done once we know a transaction has been successfully + completed.""" + + self.__consecutive_errors = 0 + + def record_connection(self, time): + """Record amount of time spent connecting.""" + + if not self.__used: + self.__used = True + + self.__connections += 1 + self.__connect_time += time + + def record_error(self, decayable=False, content=False, timeout=False): + """Record that an operation to the TransportRepoURI represented + by this RepoStats object failed with an error. + + Set decayable to true if the error is a transient + error that may be decayed by the stats framework. + + Set content to true if the error is caused by + corrupted or invalid content.""" + + if not self.__used: + self.__used = True + + self.__consecutive_errors += 1 + if decayable: + self.__decayable_err += 1 + self._err_decay += 1 + elif content: + self.__content_err += 1 + else: + self.__failed_tx += 1 + # A timeout may be decayable or not, so track it in addition + # to the other classes of errors. + if timeout: + self.__timeout_err += 1 + + def record_progress(self, bytes, seconds): + """Record time and size of a network operation to a + particular TransportRepoURI, represented by the RepoStats + object. + Place the number of bytes transferred in the bytes argument. + The time, in seconds, should be supplied in the + seconds argument.""" + + if not self.__used: + self.__used = True + self.__bytes_xfr += bytes + self.__seconds_xfr += seconds + + def record_tx(self): + """Record that an operation to the URI represented + by this RepoStats object was initiated.""" + + if not self.__used: + self.__used = True + self.__total_tx += 1 + + def reset(self): + """Reset transport stats in preparation for next operation.""" + + # The connection stats (such as number, cspeed, time) are not + # reset because the metadata bandwidth calculation would be + # skewed when picking a host that gives us fast data. In that + # case, keeping track of the latency helps quality make a + # better choice. + self.__bytes_xfr = 0.0 + self.__seconds_xfr = 0.0 + self.__failed_tx = 0 + self.__content_err = 0 + self.__decayable_err = 0 + self._err_decay = 0 + self.__total_tx = 0 + self.__consecutive_errors = 0 + self.origin_speed = 0.0 + + @property + def bytes_xfr(self): + """Return the number of bytes transferred.""" + + return self.__bytes_xfr + + @property + def connect_time(self): + """The average connection time for this host.""" + + if self.__connections == 0: + if self.__used and self.__timeout_err > 0: + return 1.0 + else: + return 0.0 + + # old-division; pylint: disable=W1619 + return self.__connect_time / self.__connections + + @property + def consecutive_errors(self): + """Return the number of successive errors this endpoint + has encountered.""" + + return self.__consecutive_errors + + @property + def failures(self): + """Return the number of failures that the client has encountered + while trying to perform operations on this repository.""" + + return self.__failed_tx + self.__content_err + self.__decayable_err + + @property + def content_errors(self): + """Return the number of content errors that the client has + encountered while trying to perform operation on this + repository.""" + + return self.__content_err + + @property + def num_connect(self): + """Return the number of times that the host has had a + connection established. This is less than or equal to the + number of transactions.""" + + return self.__connections + + @property + def priority(self): + """Return the priority of the URI, if one is assigned.""" + + if self.__priority is None: + return 0 + + return self.__priority + + @property + def scheme(self): + """Return the scheme of the RepoURI. (e.g. http, file.)""" + + return self.__scheme + + @property + def quality(self): + """Return the quality, as an integer value, of the + repository. A higher value means better quality. + + This particular implementation of quality() contains + a random term. Two successive calls to this function + may return different values.""" + + Nused = 20 + Cused = 10 + + Cspeed = 100 + Cconn_speed = 66 + Cerror = 500 + Ccontent_err = 1000 + Crand_max = 20 + Cospeed_none = 100000 + Cocspeed_none = 1 + + if self.origin_speed > 0: + ospeed = self.origin_speed + else: + ospeed = Cospeed_none + + if self.origin_cspeed > 0: + ocspeed = self.origin_cspeed + else: + ocspeed = Cocspeed_none + + # This function applies a bonus to hosts that have little or + # no usage. It started out life as a Heaviside step function, + # but it has since been adjusted so that it scales back the + # bonus as the host approaches the limit where the bonus + # is applied. Hosts with no use recieve the largest bonus, + # while hosts at <= Nused transactions receive the none. + def unused_bonus(self): + tx = 0 + + tx = self.__total_tx + + if tx < 0: + return 0 + + if tx < Nused: + return Cused * (Nused - tx) ** 2 + + return 0 + + def origin_order_bonus(self): + b = Cspeed * (self.origin_count) ** 2 + Nused**2 * Cused + return self.origin_factor * b * self.origin_decay + + # + # Quality function: + # + # This function presents the quality of a repository as an + # integer value. The quality is determined by observing + # different aspects of the repository's performance. This + # includes how often it has been used, the transfer speed, the + # connect speed, and the number of errors classified by type. + # + # The equation is currently defined as: + # + # Q = Origin_order_bonus() + Unused_bonus() + Cspeed * + # ((bytes/.001+seconds) / origin_speed)^2 + random_bonus( + # Crand_max) - Cconn_speed * (connect_speed / + # origin_connect_speed)^2 - Ccontent_error * (content_errors)^2 + # - Cerror * (non_decayable_errors + value_of_decayed_errors)^2 + # + # Unused_bonus = Cused * (MaxUsed - total tx)^2 if total_tx + # is less than MaxUsed, otherwise return 0. + # + # random_bonus is a gaussian distribution where random_max is + # set as the argument for the stddev. Most numbers generated + # will fall between 0 and -/+ random_max, but some will fall + # outside of the first standard deviation. + # + # The constants were derived by live testing, and using + # a simulated environment. + # + # old-division; pylint: disable=W1619 + q = ( + origin_order_bonus(self) + + unused_bonus(self) + + ( + Cspeed + * ((self.__bytes_xfr / (0.001 + self.__seconds_xfr)) / ospeed) + ** 2 + ) + + int(random.gauss(0, Crand_max)) + - (Cconn_speed * (self.connect_time / ocspeed) ** 2) + - (Ccontent_err * (self.__content_err) ** 2) + - (Cerror * (self.__failed_tx + self._err_decay) ** 2) + ) + return int(q) + + @property + def seconds_xfr(self): + """Return the total amount of time elapsed while performing + operations against this host.""" + + return self.__seconds_xfr + + @property + def success(self): + """Return the number of successful transaction that this client + has performed while communicating with this repository.""" + + return self.__total_tx - ( + self.__failed_tx + self.__content_err + self.__decayable_err + ) + + @property + def transfer_speed(self): + """Return the average transfer speed in bytes/sec for + operations against this uri.""" + + if self.__seconds_xfr == 0: + return 0.0 + + # old-division; pylint: disable=W1619 + return self.__bytes_xfr / self.__seconds_xfr + + @property + def url(self): + """Return the URL that identifies the repository that we're + keeping statistics about.""" + + return self.__url + + @property + def proxy(self): + """Return the default proxy being used to contact the repository + that we're keeping statistics about. Note that OS + environment variables, "http_proxy", "https_proxy", "all_proxy" + and "no_proxy" values will override this value in + pkg.client.transport.engine.""" + + return self.__proxy + + @property + def system(self): + """Return whether these statistics are being used to track the + system publisher, in which case, we always use the proxy + provided rather than proxy environment variables.""" + + return self.__system + + @property + def used(self): + """A boolean value that indicates whether the URI + has been used for network operations.""" + + return self.__used - return self.__used # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/client/transport/transport.py b/src/modules/client/transport/transport.py index 41b334123..0e6c70f21 100644 --- a/src/modules/client/transport/transport.py +++ b/src/modules/client/transport/transport.py @@ -25,7 +25,7 @@ # Copyright 2020 OmniOS Community Edition (OmniOSce) Association. # -from __future__ import print_function +from __future__ import print_function import copy import datetime as dt import errno @@ -39,8 +39,13 @@ from six.moves import http_client, range from cryptography import x509 from cryptography.hazmat.backends import default_backend -from six.moves.urllib.parse import quote, urlsplit, urlparse, urlunparse, \ - ParseResult +from six.moves.urllib.parse import ( + quote, + urlsplit, + urlparse, + urlunparse, + ParseResult, +) import pkg.catalog as catalog import pkg.client.api_errors as apx @@ -69,3791 +74,4007 @@ from pkg.client import global_settings from pkg.client.debugvalues import DebugValues from pkg.misc import PKG_RO_FILE_MODE -logger = global_settings.logger - -class TransportCfg(object): - """Contains configuration needed by the transport for proper - operations. Clients must create one of these objects, and then pass - it to a transport instance when it is initialized. This is the base - class. - """ - def __init__(self): - self.__caches = {} - - # Used to track if reset_caches() has been called at least - # once. - self.__caches_set = False +logger = global_settings.logger - self.pkg_pub_map = None - self.alt_pubs = None - # An integer that indicates the maximum times to check if a - # file needs to be uploaded for the transport. - self.max_transfer_checks = 20 - def add_cache(self, path, layout=None, pub=None, readonly=True): - """Adds the directory specified by 'path' as a location to read - file data from, and optionally to store to for the specified - publisher. 'path' must be a directory created for use with the - pkg.file_manager module. If the cache already exists for the - specified 'pub', its 'readonly' status will be updated. +class TransportCfg(object): + """Contains configuration needed by the transport for proper + operations. Clients must create one of these objects, and then pass + it to a transport instance when it is initialized. This is the base + class. + """ + + def __init__(self): + self.__caches = {} + + # Used to track if reset_caches() has been called at least + # once. + self.__caches_set = False + + self.pkg_pub_map = None + self.alt_pubs = None + # An integer that indicates the maximum times to check if a + # file needs to be uploaded for the transport. + self.max_transfer_checks = 20 + + def add_cache(self, path, layout=None, pub=None, readonly=True): + """Adds the directory specified by 'path' as a location to read + file data from, and optionally to store to for the specified + publisher. 'path' must be a directory created for use with the + pkg.file_manager module. If the cache already exists for the + specified 'pub', its 'readonly' status will be updated. + + 'layout' is an optional FileManager layout object that indicates + how file content in the cache is structured. If None, the + structure will automatically be determined at runtime for each + cache lookup request. + + 'pub' is an optional publisher prefix to restrict usage of this + cache to. If not provided, it is assumed that file data for any + publisher could be contained within this cache. + + 'readonly' is an optional boolean value indicating whether file + data should be stored here as well. Only one writeable cache + can exist for each 'pub' at a time.""" + + if not self.__caches_set: + self.reset_caches(shared=True) + + if not pub: + pub = "__all" + + pub_caches = self.__caches.setdefault(pub, []) + + write_caches = [cache for cache in pub_caches if not cache.readonly] + + # For now, there should be no write caches or a single one. + assert len(write_caches) <= 1 + + path = path.rstrip(os.path.sep) + for cache in pub_caches: + if cache.root != path: + continue + + if readonly: + # Nothing more to do. + cache.readonly = True + return - 'layout' is an optional FileManager layout object that indicates - how file content in the cache is structured. If None, the - structure will automatically be determined at runtime for each - cache lookup request. + # Ensure no other writeable caches exist for this + # publisher. + for wr_cache in write_caches: + if id(wr_cache) == id(cache): + continue + raise tx.TransportOperationError( + "Only one " + "cache that is writable for all or a " + "specific publisher may exist at a time." + ) + + cache.readonly = False + break + else: + # Either no caches exist for this publisher, or this is + # a new cache. + pub_caches.append(fm.FileManager(path, readonly, layouts=layout)) + + def gen_publishers(self): + raise NotImplementedError + + def get_caches(self, pub=None, readonly=True): + """Returns the file_manager cache objects for the specified + publisher in order of preference. That is, caches should + be checked for file content in the order returned. + + 'pub' is an optional publisher prefix. If provided, caches + designated for use with the given publisher will be returned + first followed by any caches applicable to all publishers. + + 'readonly' is an optional boolean value indicating whether + a cache for storing file data should be returned. By default, + only caches for reading file data are returned.""" + + if not self.__caches_set: + self.reset_caches(shared=True) + + if isinstance(pub, publisher.Publisher): + pub = pub.prefix + elif not pub or not isinstance(pub, six.string_types): + pub = None + + caches = [ + cache + for cache in self.__caches.get(pub, []) + if readonly or not cache.readonly + ] + + if not readonly and caches: + # If a publisher-specific writeable cache has been + # found, return it alone. + return caches + + # If not filtering on publisher, this is a readonly case, or no + # writeable cache exists for the specified publisher, return any + # publisher-specific caches first and any additional ones after. + return caches + [ + cache + for cache in self.__caches.get("__all", []) + if readonly or not cache.readonly + ] + + def get_policy(self, policy_name): + raise NotImplementedError + + def get_property(self, property_name): + raise NotImplementedError + + def get_pkg_dir(self, pfmri): + """Returns the absolute path of the directory that should be + used to store and load manifest data. + """ + raise NotImplementedError - 'pub' is an optional publisher prefix to restrict usage of this - cache to. If not provided, it is assumed that file data for any - publisher could be contained within this cache. + def get_pkg_pathname(self, pfmri): + """Returns the absolute pathname of the file that manifest data + should be stored in and loaded from. + """ + raise NotImplementedError - 'readonly' is an optional boolean value indicating whether file - data should be stored here as well. Only one writeable cache - can exist for each 'pub' at a time.""" + def get_pkg_sigs(self, fmri, pub): + """Returns a dictionary of the signature data found in the + catalog for the given package FMRI and Publisher object or None + if no catalog is available.""" - if not self.__caches_set: - self.reset_caches(shared=True) + # Check provided publisher's catalog for signature data. + if pub.catalog: + return dict(pub.catalog.get_entry_signatures(fmri)) - if not pub: - pub = "__all" + def get_pkg_alt_repo(self, pfmri): + """Returns the repository object containing the origins that + should be used to retrieve the specified package or None. - pub_caches = self.__caches.setdefault(pub, []) + 'pfmri' is the FMRI object for the package.""" - write_caches = [ - cache - for cache in pub_caches - if not cache.readonly - ] + if not self.pkg_pub_map: + return - # For now, there should be no write caches or a single one. - assert len(write_caches) <= 1 - - path = path.rstrip(os.path.sep) - for cache in pub_caches: - if cache.root != path: - continue - - if readonly: - # Nothing more to do. - cache.readonly = True - return - - # Ensure no other writeable caches exist for this - # publisher. - for wr_cache in write_caches: - if id(wr_cache) == id(cache): - continue - raise tx.TransportOperationError("Only one " - "cache that is writable for all or a " - "specific publisher may exist at a time.") - - cache.readonly = False - break - else: - # Either no caches exist for this publisher, or this is - # a new cache. - pub_caches.append(fm.FileManager(path, readonly, - layouts=layout)) - - def gen_publishers(self): - raise NotImplementedError - - def get_caches(self, pub=None, readonly=True): - """Returns the file_manager cache objects for the specified - publisher in order of preference. That is, caches should - be checked for file content in the order returned. - - 'pub' is an optional publisher prefix. If provided, caches - designated for use with the given publisher will be returned - first followed by any caches applicable to all publishers. - - 'readonly' is an optional boolean value indicating whether - a cache for storing file data should be returned. By default, - only caches for reading file data are returned.""" - - if not self.__caches_set: - self.reset_caches(shared=True) - - if isinstance(pub, publisher.Publisher): - pub = pub.prefix - elif not pub or not isinstance(pub, six.string_types): - pub = None - - caches = [ - cache - for cache in self.__caches.get(pub, []) - if readonly or not cache.readonly - ] + # Package data should be retrieved from an alternative location. + pfx, stem, ver = pfmri.tuple() + sver = str(ver) + pmap = self.pkg_pub_map + try: + return pmap[pfx][stem][sver].repository + except KeyError: + # No alternate known for source. + return - if not readonly and caches: - # If a publisher-specific writeable cache has been - # found, return it alone. - return caches - - # If not filtering on publisher, this is a readonly case, or no - # writeable cache exists for the specified publisher, return any - # publisher-specific caches first and any additional ones after. - return caches + [ - cache - for cache in self.__caches.get("__all", []) - if readonly or not cache.readonly - ] + def get_publisher(self, publisher_name): + raise NotImplementedError - def get_policy(self, policy_name): - raise NotImplementedError + def clear_caches(self, shared=False): + """Discard any cache information. - def get_property(self, property_name): - raise NotImplementedError + 'shared' is an optional boolean value indicating that any + shared cache information (caches not specific to any publisher) + should also be discarded. If True, callers are responsible for + ensuring a new set of shared cache information is added again. + """ - def get_pkg_dir(self, pfmri): - """Returns the absolute path of the directory that should be - used to store and load manifest data. - """ - raise NotImplementedError + # Caches fully set at least once. + self.__caches_set = True - def get_pkg_pathname(self, pfmri): - """Returns the absolute pathname of the file that manifest data - should be stored in and loaded from. - """ - raise NotImplementedError + for pub in list(self.__caches.keys()): + if shared or pub != "__all": + # Remove any publisher specific caches so that + # the most current publisher information can be + # used. + del self.__caches[pub] - def get_pkg_sigs(self, fmri, pub): - """Returns a dictionary of the signature data found in the - catalog for the given package FMRI and Publisher object or None - if no catalog is available.""" + def reset_caches(self, shared=False): + """Discard any cache information and reconfigure based on + current publisher configuration data. - # Check provided publisher's catalog for signature data. - if pub.catalog: - return dict(pub.catalog.get_entry_signatures( - fmri)) + 'shared' is an optional boolean value indicating that any + shared cache information (caches not specific to any publisher) + should also be discarded. If True, callers are responsible for + ensuring a new set of shared cache information is added again. + """ - def get_pkg_alt_repo(self, pfmri): - """Returns the repository object containing the origins that - should be used to retrieve the specified package or None. + # Clear the old cache setup. + self.clear_caches(shared=shared) - 'pfmri' is the FMRI object for the package.""" + # Automatically add any publisher repository origins + # or mirrors that are filesystem-based as readonly caches. + for pub in self.gen_publishers(): + repo = pub.repository + if not repo: + continue - if not self.pkg_pub_map: - return + for ruri in repo.origins + repo.mirrors: + if ruri.scheme != "file": + continue - # Package data should be retrieved from an alternative location. - pfx, stem, ver = pfmri.tuple() - sver = str(ver) - pmap = self.pkg_pub_map + path = ruri.get_pathname() try: - return pmap[pfx][stem][sver].repository - except KeyError: - # No alternate known for source. - return - - def get_publisher(self, publisher_name): - raise NotImplementedError - - def clear_caches(self, shared=False): - """Discard any cache information. - - 'shared' is an optional boolean value indicating that any - shared cache information (caches not specific to any publisher) - should also be discarded. If True, callers are responsible for - ensuring a new set of shared cache information is added again. - """ - - # Caches fully set at least once. - self.__caches_set = True - - for pub in list(self.__caches.keys()): - if shared or pub != "__all": - # Remove any publisher specific caches so that - # the most current publisher information can be - # used. - del self.__caches[pub] - - def reset_caches(self, shared=False): - """Discard any cache information and reconfigure based on - current publisher configuration data. - - 'shared' is an optional boolean value indicating that any - shared cache information (caches not specific to any publisher) - should also be discarded. If True, callers are responsible for - ensuring a new set of shared cache information is added again. - """ - - # Clear the old cache setup. - self.clear_caches(shared=shared) - - # Automatically add any publisher repository origins - # or mirrors that are filesystem-based as readonly caches. - for pub in self.gen_publishers(): - repo = pub.repository - if not repo: - continue - - for ruri in repo.origins + repo.mirrors: - if ruri.scheme != "file": - continue - - path = ruri.get_pathname() - try: - frepo = sr.Repository(root=path, - read_only=True) - for rstore in frepo.rstores: - if not rstore.file_root: - continue - if rstore.publisher and \ - rstore.publisher != pub.prefix: - # If the repository - # storage object is for - # a different publisher, - # skip it. - continue - - # Only add caches if they - # physically exist at this - # point. This avoids a storm of - # ENOENT errors that might occur - # during transfers for caches - # that will never exist on-disk. - # This is especially important - # for NFS-based repositories as - # they can significantly degrade - # transfer performance. This - # should be ok as transport will - # attempt to retrieve any - # resources that might have been - # cached here instead and fail - # gracefully if necessary. - if os.path.exists( - rstore.file_root): - self.add_cache( - rstore.file_root, - layout=rstore.file_layout, - pub=rstore.publisher, - readonly=True) - except (sr.RepositoryError, apx.ApiException): - # Cache isn't currently valid, so skip - # it for now. This essentially defers - # any errors that might be encountered - # accessing this repository until - # later when transport attempts to - # retrieve data through the engine. - continue - - incoming_root = property(doc="The absolute pathname of the " - "directory where in-progress downloads should be stored.") - - pkg_root = property(doc="The absolute pathname of the directory " - "where manifest files should be stored to and loaded from.") - - user_agent = property(doc="A string that identifies the user agent for " - "the transport.") + frepo = sr.Repository(root=path, read_only=True) + for rstore in frepo.rstores: + if not rstore.file_root: + continue + if rstore.publisher and rstore.publisher != pub.prefix: + # If the repository + # storage object is for + # a different publisher, + # skip it. + continue + + # Only add caches if they + # physically exist at this + # point. This avoids a storm of + # ENOENT errors that might occur + # during transfers for caches + # that will never exist on-disk. + # This is especially important + # for NFS-based repositories as + # they can significantly degrade + # transfer performance. This + # should be ok as transport will + # attempt to retrieve any + # resources that might have been + # cached here instead and fail + # gracefully if necessary. + if os.path.exists(rstore.file_root): + self.add_cache( + rstore.file_root, + layout=rstore.file_layout, + pub=rstore.publisher, + readonly=True, + ) + except (sr.RepositoryError, apx.ApiException): + # Cache isn't currently valid, so skip + # it for now. This essentially defers + # any errors that might be encountered + # accessing this repository until + # later when transport attempts to + # retrieve data through the engine. + continue + + incoming_root = property( + doc="The absolute pathname of the " + "directory where in-progress downloads should be stored." + ) + + pkg_root = property( + doc="The absolute pathname of the directory " + "where manifest files should be stored to and loaded from." + ) + + user_agent = property( + doc="A string that identifies the user agent for " "the transport." + ) class ImageTransportCfg(TransportCfg): - """A subclass of TransportCfg that gets its configuration information - from an Image object. - """ - - def __init__(self, image): - TransportCfg.__init__(self) - self.__img = image - - def gen_publishers(self): - return self.__img.gen_publishers() - - def get_policy(self, policy_name): - if not self.__img.cfg: - return False - return self.__img.cfg.get_policy(policy_name) - - def get_pkg_dir(self, pfmri): - """Returns the absolute path of the directory that should be - used to store and load manifest data. - """ - - return self.__img.get_manifest_dir(pfmri) - - def get_pkg_pathname(self, pfmri): - """Returns the absolute pathname of the file that the manifest - should be stored in and loaded from.""" - - return self.__img.get_manifest_path(pfmri) - - def get_pkg_sigs(self, fmri, pub): - """Returns a dictionary of the signature data found in the - catalog for the given package FMRI and Publisher object or None - if no catalog is available.""" - - # Check publisher for entry first. - try: - sigs = TransportCfg.get_pkg_sigs(self, fmri, pub) - except apx.UnknownCatalogEntry: - sigs = None + """A subclass of TransportCfg that gets its configuration information + from an Image object. + """ - if sigs is None: - # Either package was unknown or publisher catalog - # contained no signature data. Fallback to the known - # catalog as temporary sources may be in use. - kcat = self.__img.get_catalog( - self.__img.IMG_CATALOG_KNOWN) - return dict(kcat.get_entry_signatures(fmri)) - return sigs + def __init__(self, image): + TransportCfg.__init__(self) + self.__img = image - def get_pkg_alt_repo(self, pfmri): - """Returns the repository object containing the origins that - should be used to retrieve the specified package or None. + def gen_publishers(self): + return self.__img.gen_publishers() - 'pfmri' is the FMRI object for the package.""" + def get_policy(self, policy_name): + if not self.__img.cfg: + return False + return self.__img.cfg.get_policy(policy_name) - alt_repo = TransportCfg.get_pkg_alt_repo(self, pfmri) - if not alt_repo: - alt_repo = self.__img.get_pkg_repo(pfmri) - return alt_repo + def get_pkg_dir(self, pfmri): + """Returns the absolute path of the directory that should be + used to store and load manifest data. + """ - def get_property(self, property_name): - if not self.__img.cfg: - raise KeyError - return self.__img.get_property(property_name) + return self.__img.get_manifest_dir(pfmri) - def get_variant(self, variant_name): - if not self.__img.cfg: - raise KeyError - return self.__img.cfg.get_property('variant', - 'variant.' + variant_name) + def get_pkg_pathname(self, pfmri): + """Returns the absolute pathname of the file that the manifest + should be stored in and loaded from.""" - def get_publisher(self, publisher_name): - return self.__img.get_publisher(publisher_name) + return self.__img.get_manifest_path(pfmri) - def reset_caches(self, shared=True): - """Discard any publisher specific cache information and - reconfigure based on current publisher configuration data. + def get_pkg_sigs(self, fmri, pub): + """Returns a dictionary of the signature data found in the + catalog for the given package FMRI and Publisher object or None + if no catalog is available.""" - 'shared' is ignored and exists only for compatibility with - the interface defined by TransportCfg. - """ + # Check publisher for entry first. + try: + sigs = TransportCfg.get_pkg_sigs(self, fmri, pub) + except apx.UnknownCatalogEntry: + sigs = None + + if sigs is None: + # Either package was unknown or publisher catalog + # contained no signature data. Fallback to the known + # catalog as temporary sources may be in use. + kcat = self.__img.get_catalog(self.__img.IMG_CATALOG_KNOWN) + return dict(kcat.get_entry_signatures(fmri)) + return sigs + + def get_pkg_alt_repo(self, pfmri): + """Returns the repository object containing the origins that + should be used to retrieve the specified package or None. + + 'pfmri' is the FMRI object for the package.""" + + alt_repo = TransportCfg.get_pkg_alt_repo(self, pfmri) + if not alt_repo: + alt_repo = self.__img.get_pkg_repo(pfmri) + return alt_repo + + def get_property(self, property_name): + if not self.__img.cfg: + raise KeyError + return self.__img.get_property(property_name) + + def get_variant(self, variant_name): + if not self.__img.cfg: + raise KeyError + return self.__img.cfg.get_property("variant", "variant." + variant_name) + + def get_publisher(self, publisher_name): + return self.__img.get_publisher(publisher_name) + + def reset_caches(self, shared=True): + """Discard any publisher specific cache information and + reconfigure based on current publisher configuration data. + + 'shared' is ignored and exists only for compatibility with + the interface defined by TransportCfg. + """ - # Call base class method to perform initial reset of all - # cache information. - TransportCfg.reset_caches(self, shared=True) + # Call base class method to perform initial reset of all + # cache information. + TransportCfg.reset_caches(self, shared=True) - # Then add image-specific cache data after. - for path, readonly, pub, layout in self.__img.get_cachedirs(): - self.add_cache(path, layout=layout, pub=pub, - readonly=readonly) + # Then add image-specific cache data after. + for path, readonly, pub, layout in self.__img.get_cachedirs(): + self.add_cache(path, layout=layout, pub=pub, readonly=readonly) - def __get_user_agent(self): - return misc.user_agent_str(self.__img, - global_settings.client_name) + def __get_user_agent(self): + return misc.user_agent_str(self.__img, global_settings.client_name) - incoming_root = property(lambda self: self.__img._incoming_cache_dir, - doc="The absolute pathname of the directory where in-progress " - "downloads should be stored.") + incoming_root = property( + lambda self: self.__img._incoming_cache_dir, + doc="The absolute pathname of the directory where in-progress " + "downloads should be stored.", + ) - user_agent = property(__get_user_agent, doc="A string that identifies " - "the user agent for the transport.") + user_agent = property( + __get_user_agent, + doc="A string that identifies " "the user agent for the transport.", + ) class GenericTransportCfg(TransportCfg): - """A subclass of TransportCfg for use by transport clients that - do not have an image.""" + """A subclass of TransportCfg for use by transport clients that + do not have an image.""" - def __init__(self, publishers=misc.EmptyI, incoming_root=None, - pkg_root=None, policy_map=misc.EmptyDict, - property_map=misc.EmptyDict): + def __init__( + self, + publishers=misc.EmptyI, + incoming_root=None, + pkg_root=None, + policy_map=misc.EmptyDict, + property_map=misc.EmptyDict, + ): + TransportCfg.__init__(self) + self.__publishers = {} + self.__incoming_root = incoming_root + self.__pkg_root = pkg_root + self.__policy_map = policy_map + self.__property_map = property_map - TransportCfg.__init__(self) - self.__publishers = {} - self.__incoming_root = incoming_root - self.__pkg_root = pkg_root - self.__policy_map = policy_map - self.__property_map = property_map + for p in publishers: + self.__publishers[p.prefix] = p - for p in publishers: - self.__publishers[p.prefix] = p + def add_publisher(self, pub): + self.__publishers[pub.prefix] = pub - def add_publisher(self, pub): - self.__publishers[pub.prefix] = pub + def gen_publishers(self): + return (p for p in self.__publishers.values()) - def gen_publishers(self): - return (p for p in self.__publishers.values()) + def get_pkg_dir(self, pfmri): + """Returns the absolute pathname of the directory that should be + used to store and load manifest data.""" - def get_pkg_dir(self, pfmri): - """Returns the absolute pathname of the directory that should be - used to store and load manifest data.""" + return os.path.join(self.pkg_root, pfmri.get_dir_path()) - return os.path.join(self.pkg_root, pfmri.get_dir_path()) + def get_pkg_pathname(self, pfmri): + """Returns the absolute pathname of the file that manifest data + should be stored in and loaded from.""" - def get_pkg_pathname(self, pfmri): - """Returns the absolute pathname of the file that manifest data - should be stored in and loaded from.""" + return os.path.join(self.get_pkg_dir(pfmri), "manifest") - return os.path.join(self.get_pkg_dir(pfmri), "manifest") + def get_policy(self, policy_name): + return self.__policy_map.get(policy_name, False) - def get_policy(self, policy_name): - return self.__policy_map.get(policy_name, False) + def get_property(self, property_name): + return self.__property_map[property_name] - def get_property(self, property_name): - return self.__property_map[property_name] + def get_publisher(self, publisher_name): + pub = self.__publishers.get(publisher_name) + if not pub: + raise apx.UnknownPublisher(publisher_name) + return pub - def get_publisher(self, publisher_name): - pub = self.__publishers.get(publisher_name) - if not pub: - raise apx.UnknownPublisher(publisher_name) - return pub + def remove_publisher(self, publisher_name): + return self.__publishers.pop(publisher_name, None) - def remove_publisher(self, publisher_name): - return self.__publishers.pop(publisher_name, None) + def __get_user_agent(self): + return misc.user_agent_str(None, global_settings.client_name) - def __get_user_agent(self): - return misc.user_agent_str(None, global_settings.client_name) + def __set_inc_root(self, inc_root): + self.__incoming_root = inc_root - def __set_inc_root(self, inc_root): - self.__incoming_root = inc_root + def __set_pkg_root(self, pkg_root): + self.__pkg_root = pkg_root - def __set_pkg_root(self, pkg_root): - self.__pkg_root = pkg_root + incoming_root = property( + lambda self: self.__incoming_root, + __set_inc_root, + doc="Absolute pathname to directory of in-progress downloads.", + ) - incoming_root = property( - lambda self: self.__incoming_root, __set_inc_root, - doc="Absolute pathname to directory of in-progress downloads.") + pkg_root = property( + lambda self: self.__pkg_root, + __set_pkg_root, + doc="The absolute pathname of the directory where in-progress " + "downloads should be stored.", + ) - pkg_root = property(lambda self: self.__pkg_root, __set_pkg_root, - doc="The absolute pathname of the directory where in-progress " - "downloads should be stored.") + user_agent = property( + __get_user_agent, + doc="A string that identifies the user agent for the transport.", + ) - user_agent = property(__get_user_agent, - doc="A string that identifies the user agent for the transport.") class LockedTransport(object): - """A decorator class that wraps transport functions, calling - their lock and unlock methods. Due to implementation differences - in the decorator protocol, the decorator must be used with - parenthesis in order for this to function correctly. Always - decorate functions @LockedTransport().""" - - def __init__(self, *d_args, **d_kwargs): - object.__init__(self) - - def __call__(self, f): - def wrapper(*fargs, **f_kwargs): - instance, fargs = fargs[0], fargs[1:] - lock = instance._lock - lock.acquire() - try: - return f(instance, *fargs, **f_kwargs) - finally: - lock.release() - return wrapper + """A decorator class that wraps transport functions, calling + their lock and unlock methods. Due to implementation differences + in the decorator protocol, the decorator must be used with + parenthesis in order for this to function correctly. Always + decorate functions @LockedTransport().""" -def _convert_repouris(repolist): - """Given a list of RepositoryURI objects, expand them into a list of - TransportRepoURI objects, each representing a different transport path - to the given RepositoryURI, allowing the transport to eg. try all - configured proxies for a given RepositoryURI.""" + def __init__(self, *d_args, **d_kwargs): + object.__init__(self) - trans_repouris = [] - for repouri in repolist: - trans_repouris.extend( - publisher.TransportRepoURI.fromrepouri(repouri)) - return trans_repouris + def __call__(self, f): + def wrapper(*fargs, **f_kwargs): + instance, fargs = fargs[0], fargs[1:] + lock = instance._lock + lock.acquire() + try: + return f(instance, *fargs, **f_kwargs) + finally: + lock.release() + return wrapper -class Transport(object): - """The generic transport wrapper object. Its public methods should - be used by all client code that wishes to perform file/network - packaging operations.""" - - def __init__(self, tcfg): - """Initialize the Transport object. Caller must supply - a TransportCfg object.""" - - self.__engine = None - self.__cadir = None - self.__portal_test_executed = False - self.__version_check_executed = False - self.__repo_cache = None - self.__dynamic_mirrors = [] - self._lock = nrlock.NRLock() - self.cfg = tcfg - self.stats = tstats.RepoChooser() - self.repo_status = {} - self.__tmp_crls = {} - # Used to record those actions that will have their payload - # transferred. - self.__hashes = defaultdict(set) - # Used to record those CRLs which are unreachable during the - # current operation. - self.__bad_crls = set() - - def __setup(self): - self.__engine = engine.CurlTransportEngine(self) - - # Configure engine's user agent - self.__engine.set_user_agent(self.cfg.user_agent) - - self.__repo_cache = trepo.RepoCache(self.__engine) - - if self.cfg.get_policy(imageconfig.MIRROR_DISCOVERY): - self.__dynamic_mirrors = mdetect.MirrorDetector() - try: - self.__dynamic_mirrors.locate() - except tx.mDNSException: - # Not fatal. Suppress. - pass +def _convert_repouris(repolist): + """Given a list of RepositoryURI objects, expand them into a list of + TransportRepoURI objects, each representing a different transport path + to the given RepositoryURI, allowing the transport to eg. try all + configured proxies for a given RepositoryURI.""" - def reset(self): - """Resets the transport. This needs to be done - if an install plan has been canceled and needs to - be restarted. This clears the state of the - transport and its associated components.""" + trans_repouris = [] + for repouri in repolist: + trans_repouris.extend(publisher.TransportRepoURI.fromrepouri(repouri)) + return trans_repouris - if not self.__engine: - # Don't reset if not configured - return - self._lock.acquire() - try: - self.__engine.reset() - self.__repo_cache.clear_cache() - self.cfg.reset_caches() - if self.__dynamic_mirrors: - try: - self.__dynamic_mirrors.locate() - except tx.mDNSException: - # Not fatal. Suppress. - pass - finally: - self._lock.release() - - def shutdown(self): - """Shuts down any portions of the transport that can - actively be connected to remote endpoints.""" - - if not self.__engine: - # Already shut down - return - - self._lock.acquire() +class Transport(object): + """The generic transport wrapper object. Its public methods should + be used by all client code that wishes to perform file/network + packaging operations.""" + + def __init__(self, tcfg): + """Initialize the Transport object. Caller must supply + a TransportCfg object.""" + + self.__engine = None + self.__cadir = None + self.__portal_test_executed = False + self.__version_check_executed = False + self.__repo_cache = None + self.__dynamic_mirrors = [] + self._lock = nrlock.NRLock() + self.cfg = tcfg + self.stats = tstats.RepoChooser() + self.repo_status = {} + self.__tmp_crls = {} + # Used to record those actions that will have their payload + # transferred. + self.__hashes = defaultdict(set) + # Used to record those CRLs which are unreachable during the + # current operation. + self.__bad_crls = set() + + def __setup(self): + self.__engine = engine.CurlTransportEngine(self) + + # Configure engine's user agent + self.__engine.set_user_agent(self.cfg.user_agent) + + self.__repo_cache = trepo.RepoCache(self.__engine) + + if self.cfg.get_policy(imageconfig.MIRROR_DISCOVERY): + self.__dynamic_mirrors = mdetect.MirrorDetector() + try: + self.__dynamic_mirrors.locate() + except tx.mDNSException: + # Not fatal. Suppress. + pass + + def reset(self): + """Resets the transport. This needs to be done + if an install plan has been canceled and needs to + be restarted. This clears the state of the + transport and its associated components.""" + + if not self.__engine: + # Don't reset if not configured + return + + self._lock.acquire() + try: + self.__engine.reset() + self.__repo_cache.clear_cache() + self.cfg.reset_caches() + if self.__dynamic_mirrors: try: - self.__engine.shutdown() - self.__engine = None - if self.__repo_cache: - self.__repo_cache.clear_cache() - self.__repo_cache = None - self.__dynamic_mirrors = [] - finally: - self._lock.release() - - @LockedTransport() - def do_search(self, pub, data, ccancel=None, alt_repo=None): - """Perform a search request. Returns a file-like object or an - iterable that contains the search results. Callers need to - catch transport exceptions that this object may generate.""" - - failures = tx.TransportFailures() - fobj = None - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = None - - if isinstance(pub, publisher.Publisher): - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - # If version check hasn't been executed, run it prior to this - # operation. - self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) - - # For search, prefer remote sources if available. This allows - # consumers to configure both a file-based and network-based set - # of origins for a publisher without incurring the significant - # overhead of performing file-based search unless the network- - # based resource is unavailable. + self.__dynamic_mirrors.locate() + except tx.mDNSException: + # Not fatal. Suppress. + pass + finally: + self._lock.release() + + def shutdown(self): + """Shuts down any portions of the transport that can + actively be connected to remote endpoints.""" + + if not self.__engine: + # Already shut down + return + + self._lock.acquire() + try: + self.__engine.shutdown() + self.__engine = None + if self.__repo_cache: + self.__repo_cache.clear_cache() + self.__repo_cache = None + self.__dynamic_mirrors = [] + finally: + self._lock.release() + + @LockedTransport() + def do_search(self, pub, data, ccancel=None, alt_repo=None): + """Perform a search request. Returns a file-like object or an + iterable that contains the search results. Callers need to + catch transport exceptions that this object may generate.""" + + failures = tx.TransportFailures() + fobj = None + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = None + + if isinstance(pub, publisher.Publisher): + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + # If version check hasn't been executed, run it prior to this + # operation. + self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + + # For search, prefer remote sources if available. This allows + # consumers to configure both a file-based and network-based set + # of origins for a publisher without incurring the significant + # overhead of performing file-based search unless the network- + # based resource is unavailable. + no_result_url = None + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + prefer_remote=True, + alt_repo=alt_repo, + operation="search", + versions=[0, 1], + ): + if retries == 1: no_result_url = None - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, prefer_remote=True, alt_repo=alt_repo, - operation="search", versions=[0, 1]): - - if retries == 1: - no_result_url = None - elif retries > 1 and no_result_url: - continue + elif retries > 1 and no_result_url: + continue + + try: + fobj = d.do_search(data, header, ccancel=ccancel, pub=pub) + if hasattr(fobj, "_prime"): + fobj._prime() + + if hasattr(fobj, "set_lock"): + # Since we're returning a file object + # that's using the same engine as the + # rest of this transport, assign our + # lock to the fobj. It must synchronize + # with us too. + fobj.set_lock(self._lock) + + return fobj + + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + + except tx.TransportProtoError as e: + if e.code in (http_client.NOT_FOUND, errno.ENOENT): + raise apx.UnsupportedSearchError(e.url, "search/1") + elif e.code == http_client.NO_CONTENT: + no_result_url = e.url + elif e.code in (http_client.BAD_REQUEST, errno.EINVAL): + raise apx.MalformedSearchRequest(e.url) + elif e.retryable: + failures.append(e) + else: + raise - try: - fobj = d.do_search(data, header, - ccancel=ccancel, pub=pub) - if hasattr(fobj, "_prime"): - fobj._prime() - - if hasattr(fobj, "set_lock"): - # Since we're returning a file object - # that's using the same engine as the - # rest of this transport, assign our - # lock to the fobj. It must synchronize - # with us too. - fobj.set_lock(self._lock) - - return fobj - - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - - except tx.TransportProtoError as e: - if e.code in (http_client.NOT_FOUND, errno.ENOENT): - raise apx.UnsupportedSearchError(e.url, - "search/1") - elif e.code == http_client.NO_CONTENT: - no_result_url = e.url - elif e.code in (http_client.BAD_REQUEST, - errno.EINVAL): - raise apx.MalformedSearchRequest(e.url) - elif e.retryable: - failures.append(e) - else: - raise - - except tx.TransportException as e: - if e.retryable: - failures.append(e) - fobj = None - else: - raise - if no_result_url: - raise apx.NegativeSearchResult(no_result_url) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + fobj = None else: - raise failures + raise + if no_result_url: + raise apx.NegativeSearchResult(no_result_url) + else: + raise failures + + def get_ca_dir(self): + """Return the path to the directory that contains CA + certificates.""" + if self.__cadir is None: + # If transport isn't connected to image, or no + # ca-dir is specified, fallback to this one. + fb_cadir = os.path.join(os.path.sep, "etc", "ssl", "certs") + + try: + cadir = self.cfg.get_property("ca-path") + cadir = os.path.normpath(cadir) + except KeyError: + cadir = fb_cadir + + if not os.path.exists(cadir): + raise tx.TransportOperationError( + "Unable to " + "locate a CA directory: {0}\n" + "Secure connection is not " + "available.".format(cadir) + ) + + self.__cadir = cadir + return cadir + + return self.__cadir + + @LockedTransport() + def get_catalog(self, pub, ts=None, ccancel=None, path=None, alt_repo=None): + """Get the catalog for the specified publisher. If + ts is defined, request only changes newer than timestamp + ts.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + download_dir = self.cfg.incoming_root + if path: + croot = path + else: + croot = pub.catalog_root - def get_ca_dir(self): - """Return the path to the directory that contains CA - certificates.""" - if self.__cadir is None: - # If transport isn't connected to image, or no - # ca-dir is specified, fallback to this one. - fb_cadir = os.path.join(os.path.sep, "etc", - "ssl", "certs") + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() - try: - cadir = self.cfg.get_property("ca-path") - cadir = os.path.normpath(cadir) - except KeyError: - cadir = fb_cadir - - if not os.path.exists(cadir): - raise tx.TransportOperationError("Unable to " - "locate a CA directory: {0}\n" - "Secure connection is not " - "available.".format(cadir)) - - self.__cadir = cadir - return cadir - - return self.__cadir - - @LockedTransport() - def get_catalog(self, pub, ts=None, ccancel=None, path=None, - alt_repo=None): - """Get the catalog for the specified publisher. If - ts is defined, request only changes newer than timestamp - ts.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - download_dir = self.cfg.incoming_root - if path: - croot = path - else: - croot = pub.catalog_root + # If version check hasn't been executed, run it prior to this + # operation. + self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, alt_repo=alt_repo + ): + repostats = self.stats[d.get_repouri_key()] - # If version check hasn't been executed, run it prior to this - # operation. - self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + # If a transport exception occurs, + # save it if it's retryable, otherwise + # raise the error to a higher-level handler. + try: + resp = d.get_catalog(ts, header, ccancel=ccancel, pub=pub) - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, alt_repo=alt_repo): + updatelog.recv(resp, croot, ts, pub) - repostats = self.stats[d.get_repouri_key()] + return - # If a transport exception occurs, - # save it if it's retryable, otherwise - # raise the error to a higher-level handler. - try: + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportProtoError as e: + if e.code == http_client.NOT_MODIFIED: + return + elif e.retryable: + failures.append(e) + else: + raise + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + except pkg.fmri.IllegalFmri as e: + repostats.record_error() + raise tx.TransportOperationError( + "Could not retrieve catalog from '{0}'\n" + " Unable to parse FMRI. Details " + "follow:\n{1}".format(pub.prefix, e) + ) + except EnvironmentError as e: + repostats.record_error() + raise tx.TransportOperationError( + "Could not retrieve catalog from '{0}'\n" + " Exception: str:{1!s} repr:{2!r}".format(pub.prefix, e, e) + ) + + raise failures + + @staticmethod + def __ignore_network_cache(): + """Check if transport should ignore network cache.""" + + inc_debug = False + inc_global = global_settings.client_no_network_cache + # Try to read from DebugValues. + if DebugValues.get("no_network_cache", False): + inc_debug = True + + return inc_debug or inc_global + + @staticmethod + def __get_request_header(header, repostats, retries, repo): + """Get request header based on repository status and client + specified network cache option.""" + + if ( + repostats.content_errors and retries > 1 + ) or Transport.__ignore_network_cache(): + return repo.build_refetch_header(header) + return header + + @staticmethod + def _verify_catalog(filename, dirname): + """A wrapper for catalog.verify() that catches + CatalogErrors and translates them to the appropriate + InvalidContentException that the transport uses for content + verification.""" + + filepath = os.path.join(dirname, filename) - resp = d.get_catalog(ts, header, - ccancel=ccancel, pub=pub) - - updatelog.recv(resp, croot, ts, pub) - - return - - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportProtoError as e: - if e.code == http_client.NOT_MODIFIED: - return - elif e.retryable: - failures.append(e) - else: - raise - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - except pkg.fmri.IllegalFmri as e: - repostats.record_error() - raise tx.TransportOperationError( - "Could not retrieve catalog from '{0}'\n" - " Unable to parse FMRI. Details " - "follow:\n{1}".format(pub.prefix, e)) - except EnvironmentError as e: - repostats.record_error() - raise tx.TransportOperationError( - "Could not retrieve catalog from '{0}'\n" - " Exception: str:{1!s} repr:{2!r}".format( - pub.prefix, e, e)) - - raise failures - - @staticmethod - def __ignore_network_cache(): - """Check if transport should ignore network cache.""" - - inc_debug = False - inc_global = global_settings.client_no_network_cache - # Try to read from DebugValues. - if DebugValues.get("no_network_cache", False): - inc_debug = True - - return inc_debug or inc_global - - @staticmethod - def __get_request_header(header, repostats, retries, repo): - """Get request header based on repository status and client - specified network cache option.""" - - if (repostats.content_errors and retries > 1) or \ - Transport.__ignore_network_cache(): - return repo.build_refetch_header(header) - return header - - @staticmethod - def _verify_catalog(filename, dirname): - """A wrapper for catalog.verify() that catches - CatalogErrors and translates them to the appropriate - InvalidContentException that the transport uses for content - verification.""" - - filepath = os.path.join(dirname, filename) + try: + catalog.verify(filepath) + except apx.CatalogError as e: + portable.remove(filepath) + te = tx.InvalidContentException( + filepath, "CatalogPart failed validation: {0}".format(e) + ) + te.request = filename + raise te + return + + @LockedTransport() + def get_catalog1( + self, + pub, + flist, + ts=None, + path=None, + progtrack=None, + ccancel=None, + revalidate=False, + redownload=False, + alt_repo=None, + ): + """Get the catalog1 files from publisher 'pub' that + are given as a list in 'flist'. If the caller supplies + an optional timestamp argument, only get the files that + have been modified since the timestamp. At the moment, + this interface only supports supplying a timestamp + if the length of flist is 1. + + The timestamp, 'ts', should be provided as a floating + point value of seconds since the epoch in UTC. If callers + have a datetime object, they should use something like: + + time.mktime(dtobj.timetuple()) -> float + + If the caller has a UTC datetime object, the following + should be used instead: + + calendar.timegm(dtobj.utctimetuple()) -> float + + The examples above convert the object to the appropriate format + for get_catalog1. + + If the caller wants the completed download to be placed + in an alternate directory (pub.catalog_root is standard), + set a directory path in 'path'. + + If the caller knows that the upstream metadata is cached, + and needs a refresh it should set 'revalidate' to True. + If the caller knows that the upstream metadata is cached and + is corrupted, it should set 'redownload' to True. Either + 'revalidate' or 'redownload' may be used, but not both.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = [] + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + if progtrack and ccancel: + progtrack.check_cancelation = ccancel + + # Ensure that caller only passed one item, if ts was + # used. + if ts and len(flist) > 1: + raise ValueError("Ts may only be used with a single" " item flist.") + + if redownload and revalidate: + raise ValueError( + "Either revalidate or redownload" " may be used, but not both." + ) + + # download_dir is temporary download path. Completed_dir + # is the cache where valid content lives. + if path: + completed_dir = path + else: + completed_dir = pub.catalog_root + download_dir = self.cfg.incoming_root - try: - catalog.verify(filepath) - except apx.CatalogError as e: - portable.remove(filepath) - te = tx.InvalidContentException(filepath, - "CatalogPart failed validation: {0}".format(e)) - te.request = filename - raise te - return + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() - @LockedTransport() - def get_catalog1(self, pub, flist, ts=None, path=None, - progtrack=None, ccancel=None, revalidate=False, redownload=False, - alt_repo=None): - """Get the catalog1 files from publisher 'pub' that - are given as a list in 'flist'. If the caller supplies - an optional timestamp argument, only get the files that - have been modified since the timestamp. At the moment, - this interface only supports supplying a timestamp - if the length of flist is 1. + # If version check hasn't been executed, run it prior to this + # operation. + self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) - The timestamp, 'ts', should be provided as a floating - point value of seconds since the epoch in UTC. If callers - have a datetime object, they should use something like: + # Check if the download_dir exists. If it doesn't, create + # the directories. + self._makedirs(download_dir) + self._makedirs(completed_dir) - time.mktime(dtobj.timetuple()) -> float + # Call statvfs to find the blocksize of download_dir's + # filesystem. + try: + destvfs = os.statvfs(download_dir) + # Set the file buffer size to the blocksize of our + # filesystem. + self.__engine.set_file_bufsz(destvfs.f_bsize) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + else: + raise tx.TransportOperationError( + "Unable to stat VFS: {0}".format(e) + ) + except AttributeError as e: + # os.statvfs is not available on Windows + pass + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + operation="catalog", + versions=[1], + ccancel=ccancel, + alt_repo=alt_repo, + ): + failedreqs = [] + repostats = self.stats[d.get_repouri_key()] + gave_up = False + header = Transport.__get_request_header( + header, repostats, retries, d + ) + + # This returns a list of transient errors + # that occurred during the transport operation. + # An exception handler here isn't necessary + # unless we want to supress a permanent failure. + try: + errlist = d.get_catalog1( + flist, + download_dir, + header, + ts, + progtrack=progtrack, + pub=pub, + redownload=redownload, + revalidate=revalidate, + ) + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that the client just gave up, make a note + # of this condition and try another host. + gave_up = True + errlist = ex.failures + success = ex.success + + for e in errlist: + # General case: Fish the request information + # out of the exception, so the transport + # can retry the request at another host. + req = getattr(e, "request", None) + if req: + failedreqs.append(req) + failures.append(e) + else: + raise e + + if gave_up: + # If the transport gave up due to excessive + # consecutive errors, the caller is returned a + # list of successful requests, and a list of + # failures. We need to consider the requests + # that were not attempted because we gave up + # early. In this situation, they're failed + # requests, even though no exception was + # returned. Filter the flist to remove the + # successful requests. Everything else failed. + failedreqs = [x for x in flist if x not in success] + flist = failedreqs + elif failedreqs: + success = [x for x in flist if x not in failedreqs] + flist = failedreqs + else: + success = flist + flist = None + + for s in success: + dl_path = os.path.join(download_dir, s) - If the caller has a UTC datetime object, the following - should be used instead: + try: + self._verify_catalog(s, download_dir) + except tx.InvalidContentException as e: + repostats.record_error(content=True) + failedreqs.append(e.request) + failures.append(e) + if not flist: + flist = failedreqs + continue - calendar.timegm(dtobj.utctimetuple()) -> float + final_path = os.path.normpath(os.path.join(completed_dir, s)) - The examples above convert the object to the appropriate format - for get_catalog1. + finaldir = os.path.dirname(final_path) - If the caller wants the completed download to be placed - in an alternate directory (pub.catalog_root is standard), - set a directory path in 'path'. + self._makedirs(finaldir) + portable.rename(dl_path, final_path) - If the caller knows that the upstream metadata is cached, - and needs a refresh it should set 'revalidate' to True. - If the caller knows that the upstream metadata is cached and - is corrupted, it should set 'redownload' to True. Either - 'revalidate' or 'redownload' may be used, but not both.""" + # Return if everything was successful + if not flist and not errlist: + return - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = [] - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) + if failedreqs and failures: + failures = [x for x in failures if x.request in failedreqs] + tfailurex = tx.TransportFailures() + for f in failures: + tfailurex.append(f) + raise tfailurex + + @LockedTransport() + def get_publisherdata(self, pub, ccancel=None): + """Given a publisher pub, return the publisher/0 + information as a list of publisher objects. If + no publisher information was contained in the + response, the list will be empty.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures() + header = None + + if isinstance(pub, publisher.Publisher): + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + operation="publisher", + versions=[0], + ccancel=ccancel, + ): + try: + resp = d.get_publisherinfo(header, ccancel=ccancel) + infostr = resp.read() + + # If parse succeeds, then the data is valid. + pub_data = p5i.parse(data=infostr) + return [pub for pub, ignored in pub_data if pub] + except tx.ExcessiveTransientFailure as e: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(e.failures) + + except apx.InvalidP5IFile as e: + repouri_key = d.get_repouri_key() + exc = tx.TransferContentException( + repouri_key[0], + "api_errors.InvalidP5IFile:{0}".format( + " ".join([str(a) for a in e.args]) + ), + ) + repostats = self.stats[repouri_key] + repostats.record_error(content=True) + if exc.retryable: + failures.append(exc) + else: + raise exc - if progtrack and ccancel: - progtrack.check_cancelation = ccancel + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def get_syspub_data(self, repo_uri, ccancel=None): + """Get the publisher and image configuration from the system + repo given in repo_uri.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures() + header = None + + assert isinstance(self.cfg, ImageTransportCfg) + assert isinstance(repo_uri, publisher.RepositoryURI) + + for d, retries, v in self.__gen_repo( + repo_uri, + retry_count, + origin_only=True, + operation="syspub", + versions=[0], + ccancel=ccancel, + ): + try: + resp = d.get_syspub_info(header, ccancel=ccancel) + infostr = resp.read() + return p5s.parse(repo_uri.get_host(), infostr) + except tx.ExcessiveTransientFailure as e: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(e.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def get_datastream(self, pub, fhash, ccancel=None): + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures() + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, retry_count, operation="file", versions=[0, 1] + ): + repouri_key = d.get_repouri_key() + repostats = self.stats[repouri_key] + header = Transport.__get_request_header( + header, repostats, retries, d + ) + try: + return d.get_datastream( + fhash, v, header, ccancel=ccancel, pub=pub + ) + except tx.ExcessiveTransientFailure as e: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(e.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + raise failures + + @LockedTransport() + def get_content( + self, + pub, + fhash, + fmri=None, + ccancel=None, + hash_func=None, + errors="strict", + ): + """Given a fhash, return the uncompressed content content from + the remote object. This is similar to get_datastream, except + that the transport handles retrieving and decompressing the + content. + + 'fmri' If the fhash corresponds to a known package, the fmri + should be specified for optimal transport performance. + + 'hash_func' is the hash function that was used to compute fhash. + + 'errors' allows us to deal with UTF-8 encoding issues. This + really only makes sense to use when showing the content of + a license or release-note, all other cases should use 'strict' + """ - # Ensure that caller only passed one item, if ts was - # used. - if ts and len(flist) > 1: - raise ValueError("Ts may only be used with a single" - " item flist.") - - if redownload and revalidate: - raise ValueError("Either revalidate or redownload" - " may be used, but not both.") - - # download_dir is temporary download path. Completed_dir - # is the cache where valid content lives. - if path: - completed_dir = path + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures() + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + alt_repo = None + if not fmri and self.cfg.alt_pubs: + # No FMRI was provided, but alternate package sources + # are available, so create a new repository object + # that composites the repository information returned + # from the image with the alternate sources for this + # publisher. + alt_repo = pub.repository + if alt_repo: + alt_repo = copy.copy(alt_repo) + else: + alt_repo = publisher.Repository() + + for tpub in self.cfg.alt_pubs: + if tpub.prefix != pub.prefix: + continue + for o in tpub.repository.origins: + if not alt_repo.has_origin(o): + alt_repo.add_origin(o) + elif fmri: + alt_repo = self.cfg.get_pkg_alt_repo(fmri) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + operation="file", + versions=[0, 1], + alt_repo=alt_repo, + ): + repouri_key = d.get_repouri_key() + repostats = self.stats[repouri_key] + header = Transport.__get_request_header( + header, repostats, retries, d + ) + try: + resp = d.get_datastream( + fhash, v, header, ccancel=ccancel, pub=pub + ) + s = BytesIO() + hash_val = misc.gunzip_from_stream(resp, s, hash_func=hash_func) + + if hash_val != fhash: + exc = tx.InvalidContentException( + reason="hash failure: expected: {0}" + " computed: {1}".format(fhash, hash_val), + url=repouri_key[0], + proxy=repouri_key[1], + ) + repostats.record_error(content=True) + raise exc + + content = s.getvalue() + s.close() + + # we want str internally + return misc.force_str(content, errors=errors) + + except tx.ExcessiveTransientFailure as e: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(e.failures) + + except zlib.error as e: + exc = tx.TransferContentException( + repouri_key[0], + "zlib.error:{0}".format(" ".join([str(a) for a in e.args])), + proxy=repouri_key[1], + ) + repostats.record_error(content=True) + if exc.retryable: + failures.append(exc) else: - completed_dir = pub.catalog_root - download_dir = self.cfg.incoming_root + raise exc - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + raise failures + + @LockedTransport() + def get_status(self, pub, ccancel=None): + """Given a publisher pub, return the stats information + for the repository as a dictionary.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures() + header = None + + if isinstance(pub, publisher.Publisher): + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + operation="status", + versions=[0], + ccancel=ccancel, + ): + try: + repouri_key = d.get_repouri_key() + repostats = self.stats[repouri_key] + header = Transport.__get_request_header( + header, repostats, retries, d + ) + resp = d.get_status(header, ccancel=ccancel) + infostr = resp.read() + + # If parse succeeds, then the data is valid. + return dict(json.loads(infostr)) + except tx.ExcessiveTransientFailure as e: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(e.failures) + + except (TypeError, ValueError) as e: + exc = tx.TransferContentException( + repouri_key[0], + "Invalid stats response: {0}".format(e), + proxy=repouri_key[1], + ) + repostats.record_error(content=True) + if exc.retryable: + failures.append(exc) + else: + raise exc - # If version check hasn't been executed, run it prior to this - # operation. - self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def touch_manifest(self, fmri, intent=None, ccancel=None, alt_repo=None): + """Touch a manifest. This operation does not + return the manifest's content. The FMRI is given + as fmri. An optional intent string may be supplied + as intent.""" + + failures = tx.TransportFailures(pfmri=fmri) + pub_prefix = fmri.publisher + pub = self.cfg.get_publisher(pub_prefix) + mfst = fmri.get_url_path() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + intent=intent, + uuid=self.__get_uuid(pub), + variant=self.__get_variant(pub), + ) + + if not alt_repo: + alt_repo = self.cfg.get_pkg_alt_repo(fmri) + + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, alt_repo=alt_repo + ): + # If a transport exception occurs, + # save it if it's retryable, otherwise + # raise the error to a higher-level handler. + try: + d.touch_manifest(mfst, header, ccancel=ccancel, pub=pub) + return - # Check if the download_dir exists. If it doesn't, create - # the directories. - self._makedirs(download_dir) - self._makedirs(completed_dir) + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) - # Call statvfs to find the blocksize of download_dir's - # filesystem. - try: - destvfs = os.statvfs(download_dir) - # Set the file buffer size to the blocksize of our - # filesystem. - self.__engine.set_file_bufsz(destvfs.f_bsize) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException(e.filename) - else: - raise tx.TransportOperationError( - "Unable to stat VFS: {0}".format(e)) - except AttributeError as e: - # os.statvfs is not available on Windows - pass - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, operation="catalog", versions=[1], - ccancel=ccancel, alt_repo=alt_repo): - - failedreqs = [] - repostats = self.stats[d.get_repouri_key()] - gave_up = False - header = Transport.__get_request_header(header, - repostats, retries, d) - - # This returns a list of transient errors - # that occurred during the transport operation. - # An exception handler here isn't necessary - # unless we want to supress a permanent failure. - try: - errlist = d.get_catalog1(flist, download_dir, - header, ts, progtrack=progtrack, pub=pub, - redownload=redownload, - revalidate=revalidate) - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that the client just gave up, make a note - # of this condition and try another host. - gave_up = True - errlist = ex.failures - success = ex.success - - for e in errlist: - # General case: Fish the request information - # out of the exception, so the transport - # can retry the request at another host. - req = getattr(e, "request", None) - if req: - failedreqs.append(req) - failures.append(e) - else: - raise e - - - if gave_up: - # If the transport gave up due to excessive - # consecutive errors, the caller is returned a - # list of successful requests, and a list of - # failures. We need to consider the requests - # that were not attempted because we gave up - # early. In this situation, they're failed - # requests, even though no exception was - # returned. Filter the flist to remove the - # successful requests. Everything else failed. - failedreqs = [ - x for x in flist - if x not in success - ] - flist = failedreqs - elif failedreqs: - success = [ - x for x in flist - if x not in failedreqs - ] - flist = failedreqs - else: - success = flist - flist = None - - for s in success: - dl_path = os.path.join(download_dir, s) - - try: - self._verify_catalog(s, download_dir) - except tx.InvalidContentException as e: - repostats.record_error(content=True) - failedreqs.append(e.request) - failures.append(e) - if not flist: - flist = failedreqs - continue - - final_path = os.path.normpath( - os.path.join(completed_dir, s)) - - finaldir = os.path.dirname(final_path) - - self._makedirs(finaldir) - portable.rename(dl_path, final_path) - - # Return if everything was successful - if not flist and not errlist: - return - - if failedreqs and failures: - failures = [ - x for x in failures - if x.request in failedreqs - ] - tfailurex = tx.TransportFailures() - for f in failures: - tfailurex.append(f) - raise tfailurex - - @LockedTransport() - def get_publisherdata(self, pub, ccancel=None): - """Given a publisher pub, return the publisher/0 - information as a list of publisher objects. If - no publisher information was contained in the - response, the list will be empty.""" - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures() - header = None - - if isinstance(pub, publisher.Publisher): - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, operation="publisher", versions=[0], - ccancel=ccancel): - try: - resp = d.get_publisherinfo(header, - ccancel=ccancel) - infostr = resp.read() - - # If parse succeeds, then the data is valid. - pub_data = p5i.parse(data=infostr) - return [pub for pub, ignored in pub_data if pub] - except tx.ExcessiveTransientFailure as e: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(e.failures) - - except apx.InvalidP5IFile as e: - repouri_key = d.get_repouri_key() - exc = tx.TransferContentException( - repouri_key[0], - "api_errors.InvalidP5IFile:{0}".format( - " ".join([str(a) for a in e.args]))) - repostats = self.stats[repouri_key] - repostats.record_error(content=True) - if exc.retryable: - failures.append(exc) - else: - raise exc - - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def get_syspub_data(self, repo_uri, ccancel=None): - """Get the publisher and image configuration from the system - repo given in repo_uri.""" - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures() - header = None - - assert isinstance(self.cfg, ImageTransportCfg) - assert isinstance(repo_uri, publisher.RepositoryURI) - - for d, retries, v in self.__gen_repo(repo_uri, retry_count, - origin_only=True, operation="syspub", versions=[0], - ccancel=ccancel): - try: - resp = d.get_syspub_info(header, - ccancel=ccancel) - infostr = resp.read() - return p5s.parse(repo_uri.get_host(), infostr) - except tx.ExcessiveTransientFailure as e: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(e.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def get_datastream(self, pub, fhash, ccancel=None): - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures() - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - operation="file", versions=[0, 1]): - - repouri_key = d.get_repouri_key() - repostats = self.stats[repouri_key] - header = Transport.__get_request_header(header, - repostats, retries, d) - try: - return d.get_datastream(fhash, v, header, - ccancel=ccancel, pub=pub) - except tx.ExcessiveTransientFailure as e: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(e.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - raise failures - - @LockedTransport() - def get_content(self, pub, fhash, fmri=None, ccancel=None, - hash_func=None, errors="strict"): - """Given a fhash, return the uncompressed content content from - the remote object. This is similar to get_datastream, except - that the transport handles retrieving and decompressing the - content. - - 'fmri' If the fhash corresponds to a known package, the fmri - should be specified for optimal transport performance. - - 'hash_func' is the hash function that was used to compute fhash. - - 'errors' allows us to deal with UTF-8 encoding issues. This - really only makes sense to use when showing the content of - a license or release-note, all other cases should use 'strict' - """ - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures() - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - alt_repo = None - if not fmri and self.cfg.alt_pubs: - # No FMRI was provided, but alternate package sources - # are available, so create a new repository object - # that composites the repository information returned - # from the image with the alternate sources for this - # publisher. - alt_repo = pub.repository - if alt_repo: - alt_repo = copy.copy(alt_repo) - else: - alt_repo = publisher.Repository() - - for tpub in self.cfg.alt_pubs: - if tpub.prefix != pub.prefix: - continue - for o in tpub.repository.origins: - if not alt_repo.has_origin(o): - alt_repo.add_origin(o) - elif fmri: - alt_repo = self.cfg.get_pkg_alt_repo(fmri) - - for d, retries, v in self.__gen_repo(pub, retry_count, - operation="file", versions=[0, 1], alt_repo=alt_repo): - - repouri_key = d.get_repouri_key() - repostats = self.stats[repouri_key] - header = Transport.__get_request_header(header, - repostats, retries, d) - try: - resp = d.get_datastream(fhash, v, header, - ccancel=ccancel, pub=pub) - s = BytesIO() - hash_val = misc.gunzip_from_stream(resp, s, - hash_func=hash_func) - - if hash_val != fhash: - exc = tx.InvalidContentException( - reason="hash failure: expected: {0}" - " computed: {1}".format(fhash, - hash_val), url=repouri_key[0], - proxy=repouri_key[1]) - repostats.record_error(content=True) - raise exc - - content = s.getvalue() - s.close() - - # we want str internally - return misc.force_str(content, errors=errors) - - except tx.ExcessiveTransientFailure as e: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(e.failures) - - except zlib.error as e: - exc = tx.TransferContentException( - repouri_key[0], - "zlib.error:{0}".format( - " ".join([str(a) for a in e.args])), - proxy=repouri_key[1]) - repostats.record_error(content=True) - if exc.retryable: - failures.append(exc) - else: - raise exc - - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - raise failures - - @LockedTransport() - def get_status(self, pub, ccancel=None): - """Given a publisher pub, return the stats information - for the repository as a dictionary.""" - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures() - header = None - - if isinstance(pub, publisher.Publisher): - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, operation="status", versions=[0], - ccancel=ccancel): - try: - repouri_key = d.get_repouri_key() - repostats = self.stats[repouri_key] - header = Transport.__get_request_header(header, - repostats, retries, d) - resp = d.get_status(header, ccancel=ccancel) - infostr = resp.read() - - # If parse succeeds, then the data is valid. - return dict(json.loads(infostr)) - except tx.ExcessiveTransientFailure as e: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(e.failures) - - except (TypeError, ValueError) as e: - - exc = tx.TransferContentException( - repouri_key[0], - "Invalid stats response: {0}".format(e), - proxy=repouri_key[1]) - repostats.record_error(content=True) - if exc.retryable: - failures.append(exc) - else: - raise exc - - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def touch_manifest(self, fmri, intent=None, ccancel=None, - alt_repo=None): - """Touch a manifest. This operation does not - return the manifest's content. The FMRI is given - as fmri. An optional intent string may be supplied - as intent.""" - - failures = tx.TransportFailures(pfmri=fmri) - pub_prefix = fmri.publisher + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def get_compressed_attrs(self, fhash, pub=None, trans_id=None, hashes=True): + """Given a fhash, returns a tuple of (csize, chashes) where + 'csize' is the size of the file in the repository and 'chashes' + is a dictionary containing any hashes of the compressed data + known by the repository. If the repository cannot provide the + hash information or 'hashes' is False, chashes will be an empty + dictionary. If the repository does not have the file, a tuple + of (None, None) will be returned instead.""" + + failures = tx.TransportFailures() + # If the operation fails, it doesn't matter as it won't cause a + # correctness issue, and it could be the repository simply + # doesn't have the file, so don't try more than once. + retry_count = 1 + header = self.__build_header(uuid=self.__get_uuid(pub)) + + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, single_repository=True + ): + return d.get_compressed_attrs( + fhash, header, pub=pub, trans_id=trans_id, hashes=hashes + ) + + @LockedTransport() + def get_manifest( + self, + fmri, + excludes=misc.EmptyI, + intent=None, + ccancel=None, + pub=None, + content_only=False, + alt_repo=None, + ): + """Given a fmri, and optional excludes, return a manifest + object.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures(pfmri=fmri) + pub_prefix = fmri.publisher + download_dir = self.cfg.incoming_root + mcontent = None + header = None + + if not pub: + try: pub = self.cfg.get_publisher(pub_prefix) - mfst = fmri.get_url_path() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(intent=intent, - uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) + except apx.UnknownPublisher: + # Publisher has likely been removed but we need + # data from it. + raise apx.NoPublisherRepositories(pub_prefix) + + if isinstance(pub, publisher.Publisher): + header = self.__build_header( + intent=intent, + uuid=self.__get_uuid(pub), + variant=self.__get_variant(pub), + ) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + # If version check hasn't been executed, run it prior to this + # operation. + self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + + # Check if the download_dir exists. If it doesn't create + # the directories. + self._makedirs(download_dir) + + if not alt_repo: + alt_repo = self.cfg.get_pkg_alt_repo(fmri) + + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, alt_repo=alt_repo + ): + repouri_key = d.get_repouri_key() + repostats = self.stats[repouri_key] + verified = False + header = Transport.__get_request_header( + header, repostats, retries, d + ) + try: + resp = d.get_manifest(fmri, header, ccancel=ccancel, pub=pub) + # If resp is a StreamingFileObj obj, its read() + # methods will return bytes. We need str for + # manifest and here's the earliest point that + # we can convert it to str. + mcontent = misc.force_str(resp.read()) + + verified = self._verify_manifest( + fmri, content=mcontent, pub=pub + ) + + if content_only: + return mcontent + + m = manifest.FactoredManifest( + fmri, + self.cfg.get_pkg_dir(fmri), + contents=mcontent, + excludes=excludes, + pathname=self.cfg.get_pkg_pathname(fmri), + ) + + return m + + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + mcontent = None - if not alt_repo: - alt_repo = self.cfg.get_pkg_alt_repo(fmri) + except tx.InvalidContentException as e: + # We might be able to retrive uncorrupted + # content. If this was the last retry, then + # we're out of luck. + failures.append(e) + mcontent = None + repostats.record_error(content=True) - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, alt_repo=alt_repo): + except tx.TransportException as e: + if e.retryable: + failures.append(e) + mcontent = None + else: + raise + + except (apx.InvalidPackageErrors, ActionError) as e: + if verified: + raise + repostats.record_error(content=True) + te = tx.TransferContentException( + repouri_key[0], reason=str(e), proxy=repouri_key[1] + ) + failures.append(te) + + raise failures + + @LockedTransport() + def prefetch_manifests( + self, + fetchlist, + excludes=misc.EmptyI, + progtrack=None, + ccancel=None, + alt_repo=None, + ): + """Given a list of tuples [(fmri, intent), ...], prefetch + the manifests specified by the fmris in argument + fetchlist. Caller may supply a progress tracker in + 'progtrack' as well as the check-cancellation callback in + 'ccancel.' + + This method will not return transient transport errors, + but it should raise any that would cause an immediate + failure.""" + + download_dir = self.cfg.incoming_root + + if not fetchlist: + return + + if not progtrack: + progtrack = progress.NullProgressTracker() + progtrack.manifest_fetch_start(len(fetchlist)) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + # If version check hasn't been executed, run it prior to this + # operation. + try: + self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + except apx.InvalidDepotResponseException: + return - # If a transport exception occurs, - # save it if it's retryable, otherwise - # raise the error to a higher-level handler. - try: - d.touch_manifest(mfst, header, ccancel=ccancel, - pub=pub) - return - - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def get_compressed_attrs(self, fhash, pub=None, trans_id=None, - hashes=True): - """Given a fhash, returns a tuple of (csize, chashes) where - 'csize' is the size of the file in the repository and 'chashes' - is a dictionary containing any hashes of the compressed data - known by the repository. If the repository cannot provide the - hash information or 'hashes' is False, chashes will be an empty - dictionary. If the repository does not have the file, a tuple - of (None, None) will be returned instead.""" - - failures = tx.TransportFailures() - # If the operation fails, it doesn't matter as it won't cause a - # correctness issue, and it could be the repository simply - # doesn't have the file, so don't try more than once. - retry_count = 1 - header = self.__build_header(uuid=self.__get_uuid(pub)) - - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True): - return d.get_compressed_attrs(fhash, header, - pub=pub, trans_id=trans_id, hashes=hashes) - - @LockedTransport() - def get_manifest(self, fmri, excludes=misc.EmptyI, intent=None, - ccancel=None, pub=None, content_only=False, alt_repo=None): - """Given a fmri, and optional excludes, return a manifest - object.""" - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures(pfmri=fmri) - pub_prefix = fmri.publisher - download_dir = self.cfg.incoming_root - mcontent = None - header = None + # Check if the download_dir exists. If it doesn't create + # the directories. + self._makedirs(download_dir) + + # Call statvfs to find the blocksize of download_dir's + # filesystem. + try: + destvfs = os.statvfs(download_dir) + # set the file buffer size to the blocksize of + # our filesystem + self.__engine.set_file_bufsz(destvfs.f_bsize) + except EnvironmentError as e: + if e.errno == errno.EACCES: + return + else: + raise tx.TransportOperationError( + "Unable to stat VFS: {0}".format(e) + ) + except AttributeError as e: + # os.statvfs is not available on Windows + pass + + # Walk the tuples in fetchlist and create a MultiXfr + # instance for each publisher's worth of requests that + # this routine must process. + mx_pub = {} + + get_alt = not alt_repo + for fmri, intent in fetchlist: + if get_alt: + alt_repo = self.cfg.get_pkg_alt_repo(fmri) + + # Multi transfer object must be created for each unique + # publisher or repository. + if alt_repo: + eid = id(alt_repo) + else: + eid = fmri.publisher + + try: + pub = self.cfg.get_publisher(fmri.publisher) + except apx.UnknownPublisher: + # Publisher has likely been removed but we need + # data from it. + raise apx.NoPublisherRepositories(fmri.publisher) + + header = self.__build_header( + intent=intent, + uuid=self.__get_uuid(pub), + variant=self.__get_variant(pub), + ) + + if eid not in mx_pub: + mx_pub[eid] = MultiXfr( + pub, progtrack=progtrack, ccancel=ccancel, alt_repo=alt_repo + ) + + # Add requests keyed by requested package + # fmri. Value contains (header, fmri) tuple. + mx_pub[eid].add_hash(fmri, (header, fmri)) + + for mxfr in mx_pub.values(): + namelist = [k for k in mxfr] + while namelist: + chunksz = self.__chunk_size( + pub, alt_repo=mxfr.get_alt_repo(), origin_only=True + ) + mfstlist = [(n, mxfr[n][0]) for n in namelist[:chunksz]] + del namelist[:chunksz] - if not pub: - try: - pub = self.cfg.get_publisher(pub_prefix) - except apx.UnknownPublisher: - # Publisher has likely been removed but we need - # data from it. - raise apx.NoPublisherRepositories(pub_prefix) - - if isinstance(pub, publisher.Publisher): - header = self.__build_header(intent=intent, - uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - # If version check hasn't been executed, run it prior to this - # operation. - self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) - - # Check if the download_dir exists. If it doesn't create - # the directories. - self._makedirs(download_dir) - - if not alt_repo: - alt_repo = self.cfg.get_pkg_alt_repo(fmri) - - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, alt_repo=alt_repo): - - repouri_key = d.get_repouri_key() - repostats = self.stats[repouri_key] - verified = False - header = Transport.__get_request_header(header, - repostats, retries, d) - try: - resp = d.get_manifest(fmri, header, - ccancel=ccancel, pub=pub) - # If resp is a StreamingFileObj obj, its read() - # methods will return bytes. We need str for - # manifest and here's the earliest point that - # we can convert it to str. - mcontent = misc.force_str(resp.read()) - - verified = self._verify_manifest(fmri, - content=mcontent, pub=pub) - - if content_only: - return mcontent - - m = manifest.FactoredManifest(fmri, - self.cfg.get_pkg_dir(fmri), - contents=mcontent, excludes=excludes, - pathname=self.cfg.get_pkg_pathname(fmri)) - - return m - - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - mcontent = None - - except tx.InvalidContentException as e: - # We might be able to retrive uncorrupted - # content. If this was the last retry, then - # we're out of luck. - failures.append(e) - mcontent = None - repostats.record_error(content=True) - - except tx.TransportException as e: - if e.retryable: - failures.append(e) - mcontent = None - else: - raise - - except (apx.InvalidPackageErrors, ActionError) as e: - if verified: - raise - repostats.record_error(content=True) - te = tx.TransferContentException( - repouri_key[0], reason=str(e), - proxy=repouri_key[1]) - failures.append(te) - - raise failures - - @LockedTransport() - def prefetch_manifests(self, fetchlist, excludes=misc.EmptyI, - progtrack=None, ccancel=None, alt_repo=None): - """Given a list of tuples [(fmri, intent), ...], prefetch - the manifests specified by the fmris in argument - fetchlist. Caller may supply a progress tracker in - 'progtrack' as well as the check-cancellation callback in - 'ccancel.' - - This method will not return transient transport errors, - but it should raise any that would cause an immediate - failure.""" - - download_dir = self.cfg.incoming_root - - if not fetchlist: - return - - if not progtrack: - progtrack = progress.NullProgressTracker() - progtrack.manifest_fetch_start(len(fetchlist)) - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - # If version check hasn't been executed, run it prior to this - # operation. try: - self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) - except apx.InvalidDepotResponseException: - return + self._prefetch_manifests_list(mxfr, mfstlist, excludes) + except apx.PermissionsException: + progtrack.manifest_fetch_done() + return + + progtrack.manifest_fetch_done() + + def _prefetch_manifests_list(self, mxfr, mlist, excludes=misc.EmptyI): + """Perform bulk manifest prefetch. This is the routine + that downloads initiates the downloads in chunks + determined by its caller _prefetch_manifests. The mxfr + argument should be a MultiXfr object, and mlist + should be a list of tuples (fmri, header).""" + + # Don't perform multiple retries, since we're just prefetching. + retry_count = 1 + mfstlist = mlist + pub = mxfr.get_publisher() + progtrack = mxfr.get_progtrack() + assert progtrack + + # download_dir is temporary download path. + download_dir = self.cfg.incoming_root + + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, alt_repo=mxfr.get_alt_repo() + ): + failedreqs = [] + repostats = self.stats[d.get_repouri_key()] + gave_up = False + + # Possibly overkill, if any content errors were seen + # we modify the headers of all requests, not just the + # ones that failed before. Also do this if we force + # cache validation. + if ( + repostats.content_errors and retries > 1 + ) or Transport.__ignore_network_cache(): + mfstlist = [ + (fmri, d.build_refetch_header(h)) for fmri, h in mfstlist + ] - # Check if the download_dir exists. If it doesn't create - # the directories. - self._makedirs(download_dir) + # This returns a list of transient errors + # that occurred during the transport operation. + # An exception handler here isn't necessary + # unless we want to suppress a permanent failure. + try: + errlist = d.get_manifests( + mfstlist, download_dir, progtrack=progtrack, pub=pub + ) + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, record this for later + # and try a different host. + gave_up = True + errlist = ex.failures + success = ex.success + + for e in errlist: + req = getattr(e, "request", None) + if req: + failedreqs.append(req) + else: + raise e + + if gave_up: + # If the transport gave up due to excessive + # consecutive errors, the caller is returned a + # list of successful requests, and a list of + # failures. We need to consider the requests + # that were not attempted because we gave up + # early. In this situation, they're failed + # requests, even though no exception was + # returned. Filter the flist to remove the + # successful requests. Everything else failed. + failedreqs = [x[0] for x in mfstlist if x[0] not in success] + elif failedreqs: + success = [x[0] for x in mfstlist if x[0] not in failedreqs] + else: + success = [x[0] for x in mfstlist] + + for s in success: + dl_path = os.path.join(download_dir, s.get_url_path()) - # Call statvfs to find the blocksize of download_dir's - # filesystem. try: - destvfs = os.statvfs(download_dir) - # set the file buffer size to the blocksize of - # our filesystem - self.__engine.set_file_bufsz(destvfs.f_bsize) - except EnvironmentError as e: - if e.errno == errno.EACCES: - return - else: - raise tx.TransportOperationError( - "Unable to stat VFS: {0}".format(e)) - except AttributeError as e: - # os.statvfs is not available on Windows - pass - - # Walk the tuples in fetchlist and create a MultiXfr - # instance for each publisher's worth of requests that - # this routine must process. - mx_pub = {} - - get_alt = not alt_repo - for fmri, intent in fetchlist: - if get_alt: - alt_repo = self.cfg.get_pkg_alt_repo(fmri) - - # Multi transfer object must be created for each unique - # publisher or repository. - if alt_repo: - eid = id(alt_repo) - else: - eid = fmri.publisher - - try: - pub = self.cfg.get_publisher(fmri.publisher) - except apx.UnknownPublisher: - # Publisher has likely been removed but we need - # data from it. - raise apx.NoPublisherRepositories( - fmri.publisher) - - header = self.__build_header(intent=intent, - uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - if eid not in mx_pub: - mx_pub[eid] = MultiXfr(pub, - progtrack=progtrack, - ccancel=ccancel, - alt_repo=alt_repo) - - # Add requests keyed by requested package - # fmri. Value contains (header, fmri) tuple. - mx_pub[eid].add_hash(fmri, (header, fmri)) - - for mxfr in mx_pub.values(): - namelist = [k for k in mxfr] - while namelist: - chunksz = self.__chunk_size(pub, - alt_repo=mxfr.get_alt_repo(), - origin_only=True) - mfstlist = [ - (n, mxfr[n][0]) - for n in namelist[:chunksz] - ] - del namelist[:chunksz] - - try: - self._prefetch_manifests_list(mxfr, - mfstlist, excludes) - except apx.PermissionsException: - progtrack.manifest_fetch_done() - return - - progtrack.manifest_fetch_done() - - def _prefetch_manifests_list(self, mxfr, mlist, excludes=misc.EmptyI): - """Perform bulk manifest prefetch. This is the routine - that downloads initiates the downloads in chunks - determined by its caller _prefetch_manifests. The mxfr - argument should be a MultiXfr object, and mlist - should be a list of tuples (fmri, header).""" - - # Don't perform multiple retries, since we're just prefetching. - retry_count = 1 - mfstlist = mlist - pub = mxfr.get_publisher() - progtrack = mxfr.get_progtrack() - assert(progtrack) - - # download_dir is temporary download path. - download_dir = self.cfg.incoming_root - - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, alt_repo=mxfr.get_alt_repo()): - - failedreqs = [] - repostats = self.stats[d.get_repouri_key()] - gave_up = False - - # Possibly overkill, if any content errors were seen - # we modify the headers of all requests, not just the - # ones that failed before. Also do this if we force - # cache validation. - if (repostats.content_errors and retries > 1) or \ - Transport.__ignore_network_cache(): - mfstlist = [(fmri, d.build_refetch_header(h)) - for fmri, h in mfstlist] - - # This returns a list of transient errors - # that occurred during the transport operation. - # An exception handler here isn't necessary - # unless we want to suppress a permanent failure. - try: - errlist = d.get_manifests(mfstlist, - download_dir, progtrack=progtrack, pub=pub) - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, record this for later - # and try a different host. - gave_up = True - errlist = ex.failures - success = ex.success - - for e in errlist: - req = getattr(e, "request", None) - if req: - failedreqs.append(req) - else: - raise e - - if gave_up: - # If the transport gave up due to excessive - # consecutive errors, the caller is returned a - # list of successful requests, and a list of - # failures. We need to consider the requests - # that were not attempted because we gave up - # early. In this situation, they're failed - # requests, even though no exception was - # returned. Filter the flist to remove the - # successful requests. Everything else failed. - failedreqs = [ - x[0] for x in mfstlist - if x[0] not in success - ] - elif failedreqs: - success = [ - x[0] for x in mfstlist - if x[0] not in failedreqs - ] - else: - success = [ x[0] for x in mfstlist ] - - for s in success: - - dl_path = os.path.join(download_dir, - s.get_url_path()) - - try: - # Verify manifest content. - fmri = mxfr[s][1] - verified = self._verify_manifest(fmri, - dl_path) - except tx.InvalidContentException as e: - e.request = s - repostats.record_error(content=True) - failedreqs.append(s) - continue - - try: - mf = open(dl_path) - mcontent = mf.read() - mf.close() - manifest.FactoredManifest(fmri, - self.cfg.get_pkg_dir(fmri), - contents=mcontent, excludes=excludes, - pathname=self.cfg.get_pkg_pathname(fmri)) - except (apx.InvalidPackageErrors, - ActionError) as e: - if verified: - # If the manifest was physically - # valid, but can't be logically - # parsed, drive on. - portable.remove(dl_path) - progtrack.manifest_commit() - mxfr.del_hash(s) - continue - repostats.record_error(content=True) - failedreqs.append(s) - portable.remove(dl_path) - continue - - portable.remove(dl_path) - progtrack.manifest_commit() - mxfr.del_hash(s) - - # If there were failures, re-generate list for just - # failed requests. - if failedreqs: - # Generate mfstlist here, which included any - # reqs that failed during verification. - mfstlist = [ - (x,y) for x,y in mfstlist - if x in failedreqs - ] - # Return if everything was successful - else: - return - - def _verify_manifest(self, fmri, mfstpath=None, content=None, pub=None): - """Verify a manifest. The caller must supply the FMRI - for the package in 'fmri', as well as the path to the - manifest file that will be verified. If signature information - is not present, this routine returns False. If signature - information is present, and the manifest verifies, this - method returns true. If the manifest fails to verify, - this function throws an InvalidContentException. - - The caller may either specify a pathname to a file that - contains the manifest in 'mfstpath' or a string that contains - the manifest content in 'content'. One of these arguments - must be used.""" - - # Bail if manifest validation has been turned off for - # debugging/testing purposes. - if DebugValues.get("manifest_validate") == "Never": - return True - - must_verify = \ - DebugValues.get("manifest_validate") == "Always" - - if not isinstance(pub, publisher.Publisher): - # Get publisher using information from FMRI. - try: - pub = self.cfg.get_publisher(fmri.publisher) - except apx.UnknownPublisher: - if must_verify: - assert False, \ - "Did not validate manifest; " \ - "unknown publisher {0} ({1}).".format( - fmri.publisher, fmri) - return False + # Verify manifest content. + fmri = mxfr[s][1] + verified = self._verify_manifest(fmri, dl_path) + except tx.InvalidContentException as e: + e.request = s + repostats.record_error(content=True) + failedreqs.append(s) + continue try: - sigs = self.cfg.get_pkg_sigs(fmri, pub) - except apx.UnknownCatalogEntry: - if must_verify: - assert False, "Did not validate manifest; " \ - "couldn't find sigs." - return False - - if sigs and "sha-1" in sigs: - chash = sigs["sha-1"] - else: - if must_verify: - assert False, \ - "Did not validate manifest; no sha-1 sig." - return False - - if mfstpath: - mf = open(mfstpath) - mcontent = mf.read() - mf.close() - elif content is not None: - mcontent = content - else: - raise ValueError("Caller must supply either mfstpath " - "or content arguments.") + mf = open(dl_path) + mcontent = mf.read() + mf.close() + manifest.FactoredManifest( + fmri, + self.cfg.get_pkg_dir(fmri), + contents=mcontent, + excludes=excludes, + pathname=self.cfg.get_pkg_pathname(fmri), + ) + except (apx.InvalidPackageErrors, ActionError) as e: + if verified: + # If the manifest was physically + # valid, but can't be logically + # parsed, drive on. + portable.remove(dl_path) + progtrack.manifest_commit() + mxfr.del_hash(s) + continue + repostats.record_error(content=True) + failedreqs.append(s) + portable.remove(dl_path) + continue + + portable.remove(dl_path) + progtrack.manifest_commit() + mxfr.del_hash(s) + + # If there were failures, re-generate list for just + # failed requests. + if failedreqs: + # Generate mfstlist here, which included any + # reqs that failed during verification. + mfstlist = [(x, y) for x, y in mfstlist if x in failedreqs] + # Return if everything was successful + else: + return - newhash = manifest.Manifest.hash_create(mcontent) + def _verify_manifest(self, fmri, mfstpath=None, content=None, pub=None): + """Verify a manifest. The caller must supply the FMRI + for the package in 'fmri', as well as the path to the + manifest file that will be verified. If signature information + is not present, this routine returns False. If signature + information is present, and the manifest verifies, this + method returns true. If the manifest fails to verify, + this function throws an InvalidContentException. + + The caller may either specify a pathname to a file that + contains the manifest in 'mfstpath' or a string that contains + the manifest content in 'content'. One of these arguments + must be used.""" + + # Bail if manifest validation has been turned off for + # debugging/testing purposes. + if DebugValues.get("manifest_validate") == "Never": + return True + + must_verify = DebugValues.get("manifest_validate") == "Always" + + if not isinstance(pub, publisher.Publisher): + # Get publisher using information from FMRI. + try: + pub = self.cfg.get_publisher(fmri.publisher) + except apx.UnknownPublisher: + if must_verify: + assert False, ( + "Did not validate manifest; " + "unknown publisher {0} ({1}).".format( + fmri.publisher, fmri + ) + ) + return False - if chash != newhash: - if mfstpath: - sz = os.stat(mfstpath).st_size - portable.remove(mfstpath) - else: - sz = None - raise tx.InvalidContentException(mfstpath, - "manifest hash failure: fmri: {0} \n" - "expected: {1} computed: {2}".format( - fmri, chash, newhash), size=sz) - return True + try: + sigs = self.cfg.get_pkg_sigs(fmri, pub) + except apx.UnknownCatalogEntry: + if must_verify: + assert False, ( + "Did not validate manifest; " "couldn't find sigs." + ) + return False + + if sigs and "sha-1" in sigs: + chash = sigs["sha-1"] + else: + if must_verify: + assert False, "Did not validate manifest; no sha-1 sig." + return False + + if mfstpath: + mf = open(mfstpath) + mcontent = mf.read() + mf.close() + elif content is not None: + mcontent = content + else: + raise ValueError( + "Caller must supply either mfstpath " "or content arguments." + ) - @staticmethod - def __build_header(intent=None, uuid=None, variant=None): - """Return a dictionary that contains various - header fields, depending upon what arguments - were passed to the function. Supply intent header in intent - argument, uuid information in uuid argument.""" + newhash = manifest.Manifest.hash_create(mcontent) - header = {} + if chash != newhash: + if mfstpath: + sz = os.stat(mfstpath).st_size + portable.remove(mfstpath) + else: + sz = None + raise tx.InvalidContentException( + mfstpath, + "manifest hash failure: fmri: {0} \n" + "expected: {1} computed: {2}".format(fmri, chash, newhash), + size=sz, + ) + return True - if intent: - header["X-IPkg-Intent"] = intent + @staticmethod + def __build_header(intent=None, uuid=None, variant=None): + """Return a dictionary that contains various + header fields, depending upon what arguments + were passed to the function. Supply intent header in intent + argument, uuid information in uuid argument.""" - if uuid: - header["X-IPkg-UUID"] = uuid + header = {} - if variant: - header["X-IPkg-Variant"] = variant + if intent: + header["X-IPkg-Intent"] = intent - if not header: - return None + if uuid: + header["X-IPkg-UUID"] = uuid - return header + if variant: + header["X-IPkg-Variant"] = variant - def __get_uuid(self, pub): - if not self.cfg.get_policy(imageconfig.SEND_UUID): - return None + if not header: + return None - try: - return pub.client_uuid - except KeyError: - return None + return header - def __get_variant(self, pub): - if not self.cfg.get_policy(imageconfig.SEND_UUID): - return None + def __get_uuid(self, pub): + if not self.cfg.get_policy(imageconfig.SEND_UUID): + return None + + try: + return pub.client_uuid + except KeyError: + return None + + def __get_variant(self, pub): + if not self.cfg.get_policy(imageconfig.SEND_UUID): + return None + + try: + return ( + self.cfg.get_variant("opensolaris.zone") + + "," + + self.cfg.get_variant("opensolaris.imagetype") + ) + except KeyError: + return None + + @staticmethod + def _makedirs(newdir): + """A helper function for _get_files that makes directories, + if needed.""" + + if not os.path.exists(newdir): + try: + os.makedirs(newdir) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + raise tx.TransportOperationError( + "Unable to " "make directory: {0}".format(e) + ) + + def _get_files_list(self, mfile, flist): + """Download the files given in argument 'flist'. This + allows us to break up download operations into multiple + chunks. Since we re-evaluate our host selection after + each chunk, this gives us a better way of reacting to + changing conditions in the network.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = [] + filelist = flist + pub = mfile.get_publisher() + progtrack = mfile.get_progtrack() + header = None + + if isinstance(pub, publisher.Publisher): + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + # download_dir is temporary download path. + download_dir = self.cfg.incoming_root + + cache = self.cfg.get_caches(pub, readonly=False) + if cache: + # For now, pick first cache in list, if any are + # present. + cache = cache[0] + else: + cache = None + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + operation="file", + versions=[0, 1], + alt_repo=mfile.get_alt_repo(), + ): + failedreqs = [] + repostats = self.stats[d.get_repouri_key()] + header = Transport.__get_request_header( + header, repostats, retries, d + ) + + gave_up = False + + # This returns a list of transient errors + # that occurred during the transport operation. + # An exception handler here isn't necessary + # unless we want to supress a permanant failure. + try: + errlist = d.get_files( + filelist, download_dir, progtrack, v, header, pub=pub + ) + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, record this for later + # and try a different host. + gave_up = True + errlist = ex.failures + success = ex.success + + for e in errlist: + req = getattr(e, "request", None) + if req: + failedreqs.append(req) + failures.append(e) + else: + raise e + + if gave_up: + # If the transport gave up due to excessive + # consecutive errors, the caller is returned a + # list of successful requests, and a list of + # failures. We need to consider the requests + # that were not attempted because we gave up + # early. In this situation, they're failed + # requests, even though no exception was + # returned. Filter the flist to remove the + # successful requests. Everything else failed. + failedreqs = [x for x in filelist if x not in success] + filelist = failedreqs + elif failedreqs: + success = [x for x in filelist if x not in failedreqs] + filelist = failedreqs + else: + success = filelist + filelist = None + + for s in success: + dl_path = os.path.join(download_dir, s) try: - return ( - self.cfg.get_variant('opensolaris.zone') + ',' + - self.cfg.get_variant('opensolaris.imagetype')) - except KeyError: - return None - - @staticmethod - def _makedirs(newdir): - """A helper function for _get_files that makes directories, - if needed.""" - - if not os.path.exists(newdir): - try: - os.makedirs(newdir) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - raise tx.TransportOperationError("Unable to " - "make directory: {0}".format(e)) - - def _get_files_list(self, mfile, flist): - """Download the files given in argument 'flist'. This - allows us to break up download operations into multiple - chunks. Since we re-evaluate our host selection after - each chunk, this gives us a better way of reacting to - changing conditions in the network.""" - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = [] - filelist = flist - pub = mfile.get_publisher() - progtrack = mfile.get_progtrack() - header = None - - if isinstance(pub, publisher.Publisher): - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - # download_dir is temporary download path. - download_dir = self.cfg.incoming_root - - cache = self.cfg.get_caches(pub, readonly=False) + self._verify_content(mfile[s][0], dl_path) + except tx.InvalidContentException as e: + mfile.subtract_progress(e.size) + e.request = s + repostats.record_error(content=True) + failedreqs.append(s) + failures.append(e) + if not filelist: + filelist = failedreqs + continue + if cache: - # For now, pick first cache in list, if any are - # present. - cache = cache[0] + cpath = cache.insert(s, dl_path) + mfile.file_done(s, cpath) else: - cache = None + mfile.file_done(s, dl_path) - for d, retries, v in self.__gen_repo(pub, retry_count, - operation="file", versions=[0, 1], - alt_repo=mfile.get_alt_repo()): - - failedreqs = [] - repostats = self.stats[d.get_repouri_key()] - header = Transport.__get_request_header(header, - repostats, retries, d) + # Return if everything was successful + if not filelist and not errlist: + return - gave_up = False + if failedreqs and failures: + failures = [x for x in failures if x.request in failedreqs] + tfailurex = tx.TransportFailures(pfmri=mfile.pfmri) + for f in failures: + tfailurex.append(f) + raise tfailurex + + @LockedTransport() + def _get_files(self, mfile): + """Perform an operation that gets multiple files at once. + A mfile object contains information about the multiple-file + request that will be performed.""" + + download_dir = self.cfg.incoming_root + pub = mfile.get_publisher() + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + # If version check hasn't been executed, run it prior to this + # operation. + self._version_check_all( + ccancel=mfile.get_ccancel(), alt_repo=mfile.get_alt_repo() + ) + + # Check if the download_dir exists. If it doesn't create + # the directories. + self._makedirs(download_dir) + + # Call statvfs to find the blocksize of download_dir's + # filesystem. + try: + destvfs = os.statvfs(download_dir) + # set the file buffer size to the blocksize of + # our filesystem + self.__engine.set_file_bufsz(destvfs.f_bsize) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + else: + raise tx.TransportOperationError( + "Unable to stat VFS: {0}".format(e) + ) + except AttributeError as e: + # os.statvfs is not available on Windows + pass + + while mfile: + filelist = [] + chunksz = self.__chunk_size(pub, alt_repo=mfile.get_alt_repo()) + + for i, v in enumerate(mfile): + if i >= chunksz: + break + filelist.append(v) + + self._get_files_list(mfile, filelist) + + def __format_safe_read_crl(self, pth): + """CRLs seem to frequently come in DER format, so try reading + the CRL using both of the formats before giving up.""" + + with open(pth, "rb") as f: + raw = f.read() - # This returns a list of transient errors - # that occurred during the transport operation. - # An exception handler here isn't necessary - # unless we want to supress a permanant failure. - try: - errlist = d.get_files(filelist, download_dir, - progtrack, v, header, pub=pub) - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, record this for later - # and try a different host. - gave_up = True - errlist = ex.failures - success = ex.success - - for e in errlist: - req = getattr(e, "request", None) - if req: - failedreqs.append(req) - failures.append(e) - else: - raise e - - if gave_up: - # If the transport gave up due to excessive - # consecutive errors, the caller is returned a - # list of successful requests, and a list of - # failures. We need to consider the requests - # that were not attempted because we gave up - # early. In this situation, they're failed - # requests, even though no exception was - # returned. Filter the flist to remove the - # successful requests. Everything else failed. - failedreqs = [ - x for x in filelist - if x not in success - ] - filelist = failedreqs - elif failedreqs: - success = [ - x for x in filelist - if x not in failedreqs - ] - filelist = failedreqs - else: - success = filelist - filelist = None - - for s in success: - - dl_path = os.path.join(download_dir, s) - - try: - self._verify_content(mfile[s][0], - dl_path) - except tx.InvalidContentException as e: - mfile.subtract_progress(e.size) - e.request = s - repostats.record_error(content=True) - failedreqs.append(s) - failures.append(e) - if not filelist: - filelist = failedreqs - continue - - if cache: - cpath = cache.insert(s, dl_path) - mfile.file_done(s, cpath) - else: - mfile.file_done(s, dl_path) - - # Return if everything was successful - if not filelist and not errlist: - return - - if failedreqs and failures: - failures = [ - x for x in failures - if x.request in failedreqs - ] - tfailurex = tx.TransportFailures(pfmri=mfile.pfmri) - for f in failures: - tfailurex.append(f) - raise tfailurex - - @LockedTransport() - def _get_files(self, mfile): - """Perform an operation that gets multiple files at once. - A mfile object contains information about the multiple-file - request that will be performed.""" - - download_dir = self.cfg.incoming_root - pub = mfile.get_publisher() - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - # If version check hasn't been executed, run it prior to this - # operation. - self._version_check_all(ccancel=mfile.get_ccancel(), - alt_repo=mfile.get_alt_repo()) - - # Check if the download_dir exists. If it doesn't create - # the directories. - self._makedirs(download_dir) - - # Call statvfs to find the blocksize of download_dir's - # filesystem. - try: - destvfs = os.statvfs(download_dir) - # set the file buffer size to the blocksize of - # our filesystem - self.__engine.set_file_bufsz(destvfs.f_bsize) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException(e.filename) - else: - raise tx.TransportOperationError( - "Unable to stat VFS: {0}".format(e)) - except AttributeError as e: - # os.statvfs is not available on Windows - pass - - while mfile: - - filelist = [] - chunksz = self.__chunk_size(pub, - alt_repo=mfile.get_alt_repo()) - - for i, v in enumerate(mfile): - if i >= chunksz: - break - filelist.append(v) - - self._get_files_list(mfile, filelist) - - def __format_safe_read_crl(self, pth): - """CRLs seem to frequently come in DER format, so try reading - the CRL using both of the formats before giving up.""" - - with open(pth, "rb") as f: - raw = f.read() + try: + return x509.load_pem_x509_crl(raw, default_backend()) + except ValueError: + try: + return x509.load_der_x509_crl(raw, default_backend()) + except ValueError: + raise apx.BadFileFormat( + _( + "The CRL file " "{0} is not in a recognized " "format." + ).format(pth) + ) + + @LockedTransport() + def get_crl(self, uri, crl_root, more_uris=False): + """Given a URI (for now only http URIs are supported), return + the CRL object created from the file stored at that uri. + + uri: URI for a CRL. + + crl_root: file-system based crl root directory for storing + retrieved the CRL. + """ - try: - return x509.load_pem_x509_crl(raw, default_backend()) - except ValueError: - try: - return x509.load_der_x509_crl(raw, - default_backend()) - except ValueError: - raise apx.BadFileFormat(_("The CRL file " - "{0} is not in a recognized " - "format.").format(pth)) - - @LockedTransport() - def get_crl(self, uri, crl_root, more_uris=False): - """Given a URI (for now only http URIs are supported), return - the CRL object created from the file stored at that uri. - - uri: URI for a CRL. - - crl_root: file-system based crl root directory for storing - retrieved the CRL. - """ - - uri = uri.strip() - if uri.startswith("Full Name:"): - uri = uri[len("Full Name:"):] - uri = uri.strip() - if uri.startswith("URI:"): - uri = uri[4:] - if not uri.startswith("http://") and \ - not uri.startswith("file://"): - raise apx.InvalidResourceLocation(uri.strip()) - crl_host = DebugValues.get_value("crl_host") - if crl_host: - orig = urlparse(uri) - crl = urlparse(crl_host) - uri = urlunparse(ParseResult( - scheme=crl.scheme, netloc=crl.netloc, - path=orig.path, - params=orig.params, query=orig.params, - fragment=orig.fragment)) - # If we've already read the CRL, use the previously created - # object. - if uri in self.__tmp_crls: - return self.__tmp_crls[uri] - fn = quote(uri, "") - if not os.path.isdir(crl_root): - raise apx.InvalidResourceLocation(_("CRL root: {0}" - ).format(crl_root)) - - fpath = os.path.join(crl_root, fn) - crl = None - # Check if we already have a CRL for this URI. - if os.path.exists(fpath): - # If we already have a CRL that we can read, check - # whether it's time to retrieve a new one from the - # location. - try: - crl = self.__format_safe_read_crl(fpath) - except EnvironmentError: - pass - else: - nu = crl.next_update - cur_time = dt.datetime.utcnow() - - if cur_time < nu: - self.__tmp_crls[uri] = crl - return crl - - # If the CRL is already known to be unavailable, don't try - # connecting to it again. - if uri in self.__bad_crls: + uri = uri.strip() + if uri.startswith("Full Name:"): + uri = uri[len("Full Name:") :] + uri = uri.strip() + if uri.startswith("URI:"): + uri = uri[4:] + if not uri.startswith("http://") and not uri.startswith("file://"): + raise apx.InvalidResourceLocation(uri.strip()) + crl_host = DebugValues.get_value("crl_host") + if crl_host: + orig = urlparse(uri) + crl = urlparse(crl_host) + uri = urlunparse( + ParseResult( + scheme=crl.scheme, + netloc=crl.netloc, + path=orig.path, + params=orig.params, + query=orig.params, + fragment=orig.fragment, + ) + ) + # If we've already read the CRL, use the previously created + # object. + if uri in self.__tmp_crls: + return self.__tmp_crls[uri] + fn = quote(uri, "") + if not os.path.isdir(crl_root): + raise apx.InvalidResourceLocation( + _("CRL root: {0}").format(crl_root) + ) + + fpath = os.path.join(crl_root, fn) + crl = None + # Check if we already have a CRL for this URI. + if os.path.exists(fpath): + # If we already have a CRL that we can read, check + # whether it's time to retrieve a new one from the + # location. + try: + crl = self.__format_safe_read_crl(fpath) + except EnvironmentError: + pass + else: + nu = crl.next_update + cur_time = dt.datetime.utcnow() + + if cur_time < nu: + self.__tmp_crls[uri] = crl + return crl + + # If the CRL is already known to be unavailable, don't try + # connecting to it again. + if uri in self.__bad_crls: + return crl + + # If no CRL already exists or it's time to try to get a new one, + # try to retrieve it from the server. + try: + tmp_fd, tmp_pth = tempfile.mkstemp(dir=crl_root) + except EnvironmentError as e: + if e.errno in (errno.EACCES, errno.EPERM): + tmp_fd, tmp_pth = tempfile.mkstemp() + else: + raise apx._convert_error(e) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + orig = urlparse(uri) + # To utilize the transport engine, we need to pretend uri for + # a crl is like a repo, because the transport engine has some + # specific bookkeeping stats keyed by repouri and proxy. + # We did this to utilize it as much as possible. + repouri = urlunparse( + ParseResult( + scheme=orig.scheme, + netloc=orig.netloc, + path="", + params="", + query="", + fragment="", + ) + ) + proxy = misc.get_runtime_proxy(None, uri) + t_repouris = _convert_repouris( + [publisher.RepositoryURI(repouri, proxy=proxy)] + ) + + retries = 2 + # We need to call get_repostats to establish the initial + # stats. + self.stats.get_repostats(t_repouris) + for i in range(retries): + self.__engine.add_url( + uri, filepath=tmp_pth, repourl=repouri, proxy=proxy + ) + try: + while self.__engine.pending: + self.__engine.run() + rf = self.__engine.check_status() + if rf: + # If there are non-retryable failure + # cases or more uris available, do not + # retry this one and add it to bad crl + # list. + if any(not f.retryable for f in rf) or more_uris: + self.__bad_crls.add(uri) return crl + # Last retry failed, also consider it as + # a bad crl. + elif i >= retries - 1: + self.__bad_crls.add(uri) + return crl + else: + break + except tx.ExcessiveTransientFailure as e: + # Since there are too many consecutive errors, + # we probably just consider it as a bad crl. + self.__bad_crls.add(uri) + # Reset the engine. + self.__engine.reset() + return crl - # If no CRL already exists or it's time to try to get a new one, - # try to retrieve it from the server. - try: - tmp_fd, tmp_pth = tempfile.mkstemp(dir=crl_root) - except EnvironmentError as e: - if e.errno in (errno.EACCES, errno.EPERM): - tmp_fd, tmp_pth = tempfile.mkstemp() - else: - raise apx._convert_error(e) - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - orig = urlparse(uri) - # To utilize the transport engine, we need to pretend uri for - # a crl is like a repo, because the transport engine has some - # specific bookkeeping stats keyed by repouri and proxy. - # We did this to utilize it as much as possible. - repouri = urlunparse(ParseResult(scheme=orig.scheme, - netloc=orig.netloc, path="", params="", query="", - fragment="")) - proxy = misc.get_runtime_proxy(None, uri) - t_repouris = _convert_repouris([publisher.RepositoryURI(repouri, - proxy=proxy)]) - - retries = 2 - # We need to call get_repostats to establish the initial - # stats. - self.stats.get_repostats(t_repouris) - for i in range(retries): - self.__engine.add_url(uri, filepath=tmp_pth, - repourl=repouri, proxy=proxy) - try: - while self.__engine.pending: - self.__engine.run() - rf = self.__engine.check_status() - if rf: - # If there are non-retryable failure - # cases or more uris available, do not - # retry this one and add it to bad crl - # list. - if any(not f.retryable for f in rf) or \ - more_uris: - self.__bad_crls.add(uri) - return crl - # Last retry failed, also consider it as - # a bad crl. - elif i >= retries - 1: - self.__bad_crls.add(uri) - return crl - else: - break - except tx.ExcessiveTransientFailure as e: - # Since there are too many consecutive errors, - # we probably just consider it as a bad crl. - self.__bad_crls.add(uri) - # Reset the engine. - self.__engine.reset() - return crl + try: + ncrl = self.__format_safe_read_crl(tmp_pth) + except apx.BadFileFormat: + portable.remove(tmp_pth) + return crl + except EnvironmentError: + # If the tmp_pth was deleted by transport engine or + # anything else about opening files, do the following. + try: + portable.remove(tmp_pth) + except EnvironmentError: + pass + return crl - try: - ncrl = self.__format_safe_read_crl(tmp_pth) - except apx.BadFileFormat: - portable.remove(tmp_pth) - return crl - except EnvironmentError: - # If the tmp_pth was deleted by transport engine or - # anything else about opening files, do the following. - try: - portable.remove(tmp_pth) - except EnvironmentError: - pass - return crl + try: + portable.rename(tmp_pth, fpath) + # Because the file was made using mkstemp, we need to + # chmod it to match the other files in var/pkg. + os.chmod(fpath, PKG_RO_FILE_MODE) + except EnvironmentError: + self.__tmp_crls[uri] = ncrl + try: + portable.remove(tmp_pth) + except EnvironmentError: + pass + return ncrl + + def get_versions(self, pub, ccancel=None, alt_repo=None): + """Query the publisher's origin servers for versions + information. Return a dictionary of "name":"versions" """ + + self._lock.acquire() + try: + v = self._get_versions(pub, ccancel=ccancel, alt_repo=alt_repo) + finally: + self._lock.release() + + return v + + def _get_versions(self, pub, ccancel=None, alt_repo=None): + """Implementation of get_versions""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + failures = tx.TransportFailures() + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + # If version check hasn't been executed, run it prior to this + # operation. + self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, alt_repo=alt_repo + ): + repostats = self.stats[d.get_repouri_key()] + header = Transport.__get_request_header( + header, repostats, retries, d + ) + + # If a transport exception occurs, + # save it if it's retryable, otherwise + # raise the error to a higher-level handler. + try: + vers = self.__get_version(d, header, ccancel=ccancel) + # Save this information for later use, too. + self.__fill_repo_vers(d, vers) + return vers + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + for f in ex.failures: + f.url = d.get_url() + failures.append(f) + + except tx.InvalidContentException as e: + repostats.record_error(content=True) + e.reason = "Unable to parse repository's " "versions/0 response" + failures.append(e) + + except tx.TransportException as e: + e.url = d.get_url() + if e.retryable: + failures.append(e) + else: + raise + raise failures - try: - portable.rename(tmp_pth, fpath) - # Because the file was made using mkstemp, we need to - # chmod it to match the other files in var/pkg. - os.chmod(fpath, PKG_RO_FILE_MODE) - except EnvironmentError: - self.__tmp_crls[uri] = ncrl + @staticmethod + def __get_version(repo, header=None, ccancel=None): + """An internal method that returns a versions dictionary + given a transport repo object.""" + + resp = repo.get_versions(header, ccancel=ccancel) + verlines = resp.readlines() + + try: + return dict(s.split(None, 1) for s in (l.strip() for l in verlines)) + except ValueError as e: + raise tx.InvalidContentException(e) + + def __fill_repo_vers(self, repo, vers=None, ccancel=None): + """Download versions information for the transport + repository object and store that information inside + of it.""" + + # Call __get_version to get the version dictionary + # from the repo. + + if not vers: + try: + vers = self.__get_version(repo, ccancel=ccancel) + except tx.InvalidContentException: + raise tx.PkgProtoError( + repo.get_url(), + "versions", + 0, + "InvalidContentException while parsing " "response", + ) + + for key, val in vers.items(): + # Don't turn this line into a list of versions. + if key == "pkg-server": + continue + + try: + versids = [int(v) for v in val.split()] + except ValueError: + raise tx.PkgProtoError( + repo.get_url(), + "versions", + 0, + "Unable to parse version ids.", + ) + + # Insert the list back into the dictionary. + versids.sort(reverse=True) + vers[key] = versids + + repo.add_version_data(vers) + + def __gen_repo( + self, + pub, + count, + prefer_remote=False, + origin_only=False, + single_repository=False, + operation=None, + versions=None, + ccancel=None, + alt_repo=None, + ): + """An internal method that returns the list of Repo objects + for a given Publisher. Callers use this method to generate + lists of endpoints for transport operations, and to retry + operations to a single endpoint. + + The 'pub' argument is a Publisher object or RepositoryURI + object. This is used to lookup a transport.Repo object. + + The 'count' argument determines how many times the routine + will iterate through a list of endpoints. The number of times + we've iterated when calling this method is included in the + tuple that is yielded. + + 'prefer_remote' is an optional boolean value indicating whether + network-based sources are preferred over local sources. If + True, network-based origins will be returned first after the + default order criteria has been applied. This is a very + special case operation, and should not be used liberally. + + 'origin_only' returns only endpoints that are Origins. + This allows the caller to exclude mirrors from the list, + for operations that are meta-data only. + + If callers are performing a publication operation and want + to ensure that only one Repository is used as an endpoint, + 'single_repository' should be set to True. + + If callers wish to only obtain repositories that support + a particular version of an operation, they should supply + the operation's name as a string to the 'operation' argument. + The 'versions' argument should contain the desired available + versions for the operation. This must be given as integers + in a list. + + If a versioned operation is requested, this routine may have + to perform network operations to complete the request. If + cancellation is desired, a cancellation object should be + passed in the 'ccancel' argument. + + By default, this routine looks at a Publisher's + repository. If the caller would like to use a + different Repository object, it should pass one in + 'alt_repo.' + + This function returns a tuple containing a Repo object and + the number of times we've iterated through the endpoints. If + versions and operation are specified, it returns a tuple of + (Repo, iteration, highest supported version). + """ + + if not self.__engine: + self.__setup() + + # If alt_repo supplied, use that as the Repository. + # Otherwise, check that a Publisher was passed, and use + # its repository. + repo = None + if alt_repo: + repo = alt_repo + elif isinstance(pub, publisher.Publisher): + repo = pub.repository + if not repo: + raise apx.NoPublisherRepositories(pub) + + if repo and origin_only: + repolist = repo.origins + origins = repo.origins + if single_repository: + assert len(repolist) == 1 + elif repo: + repolist = repo.mirrors[:] + repolist.extend(repo.origins) + repolist.extend(self.__dynamic_mirrors) + origins = repo.origins + else: + # Caller passed RepositoryURI object in as + # pub argument, repolist is the RepoURI + repolist = [pub] + origins = repolist + + repolist = _convert_repouris(repolist) + origins = _convert_repouris(origins) + + def remote_first(a, b): + # For now, any URI using the file scheme is considered + # local. Realistically, it could be an NFS mount, etc. + # However, that's a further refinement that can be done + # later. + aremote = a[0].scheme != "file" + bremote = b[0].scheme != "file" + return misc.cmp(aremote, bremote) * -1 + + if versions: + versions = sorted(versions, reverse=True) + + fail = None + iteration = 0 + for i in range(count): + iteration += 1 + rslist = self.stats.get_repostats(repolist, origins) + if prefer_remote: + rslist.sort(key=cmp_to_key(remote_first)) + + fail = tx.TransportFailures() + repo_found = False + for rs, ruri in rslist: + if operation and versions: + repo = self.__repo_cache.new_repo(rs, ruri) + if not repo.has_version_data(): try: - portable.remove(tmp_pth) - except EnvironmentError: - pass - return ncrl + self.__fill_repo_vers(repo, ccancel=ccancel) + except tx.TransportException as ex: + # Encountered a + # transport error while + # trying to contact this + # origin. Save the + # errors on each retry + # so that they can be + # raised instead of + # an unsupported + # operation error. + if isinstance(ex, tx.TransportFailures): + fail.extend(ex.exceptions) + else: + fail.append(ex) + continue - def get_versions(self, pub, ccancel=None, alt_repo=None): - """Query the publisher's origin servers for versions - information. Return a dictionary of "name":"versions" """ + verid = repo.supports_version(operation, versions) + if verid >= 0: + repo_found = True + yield repo, iteration, verid + else: + repo_found = True + yield self.__repo_cache.new_repo(rs, ruri), iteration + + if not repo_found and fail: + raise fail + + if not origins and isinstance(pub, publisher.Publisher): + # Special error case; no transport configuration + # available for this publisher. + raise apx.NoPublisherRepositories(pub) + + if not repo_found and operation and versions: + # If a versioned operation was requested and + # wasn't found, then raise an unsupported + # exception using the newest version allowed. + raise apx.UnsupportedRepositoryOperation( + pub, "{0}/{1:d}".format(operation, versions[-1]) + ) + + def __chunk_size(self, pub, alt_repo=None, origin_only=False): + """Determine the chunk size based upon how many of the known + mirrors have been visited. If not all mirrors have been + visited, choose a small size so that if it ends up being + a poor choice, the client doesn't transfer too much data.""" + + CHUNK_SMALL = 10 + CHUNK_LARGE = 100 + CHUNK_HUGE = 1024 + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + if alt_repo: + repolist = alt_repo.origins[:] + if not origin_only: + repolist.extend(alt_repo.mirrors) + elif isinstance(pub, publisher.Publisher): + repo = pub.repository + if not repo: + raise apx.NoPublisherRepositories(pub) + repolist = repo.origins[:] + if not origin_only: + repolist.extend(repo.mirrors) + else: + # If caller passed RepositoryURI object in as + # pub argument, repolist is the RepoURI. + repolist = [pub] + + repolist = _convert_repouris(repolist) + n = len(repolist) + m = self.stats.get_num_visited(repolist) + if n == 1: + return CHUNK_HUGE + if m < n: + return CHUNK_SMALL + return CHUNK_LARGE + + @LockedTransport() + def valid_publisher_test(self, pub, ccancel=None): + """Test that the publisher supplied in pub actually + points to a valid packaging server.""" - self._lock.acquire() + try: + vd = self._get_versions(pub, ccancel=ccancel) + except tx.TransportException as e: + # Failure when contacting server. Report + # this as an error. Attempt to report + # the specific origin that failed, and + # if not available, fallback to the + # first one for the publisher. + url = getattr(e, "url", pub["origin"]) + raise apx.InvalidDepotResponseException( + url, + "Transport errors encountered when trying to " + "contact repository.\nReported the following " + "errors:\n{0}".format(e), + ) + + if not self._valid_versions_test(vd): + url = pub["origin"] + raise apx.InvalidDepotResponseException( + url, "Invalid or unparseable version information." + ) + + return True + + def _version_check_all(self, alt_repo=None, ccancel=None): + # Retrieve version info for all publishers to fill version info + # and test if repositories are responding. + + if self.__version_check_executed: + return + + self.__version_check_executed = True + + pubs = [pub for pub in self.cfg.gen_publishers()] + if not pubs and alt_repo: + # Special case -- no configured publishers exist, but + # an alternate package source was specified, so create + # a temporary publisher using alternate repository. + pubs = [publisher.Publisher("temporary", repository=alt_repo)] + + fail = tx.TransportFailures() + for pub in pubs: + if pub.prefix in self.repo_status: + # This publisher has already been tested, ignore + continue + for origin in pub.repository.origins: + p = copy.copy(pub) + p.repository.origins = [origin] try: - v = self._get_versions(pub, ccancel=ccancel, - alt_repo=alt_repo) - finally: - self._lock.release() + self._version_check(p, ccancel=ccancel) + except apx.InvalidDepotResponseException as e: + # If there is a network connection issue + # with this repo ignore here. It will + # get recorded in self.repo_status. + pass + + def version_check(self, pub, ccancel=None): + """Retrieve version info from publisher and fill internal + version caches. If we encounter problems contacting the repo, + store that information for later.""" + self._lock.acquire() + try: + self._version_check(pub, ccancel=ccancel) + finally: + self._lock.release() - return v + def _version_check(self, pub, ccancel=None): + """Implementation of version check.""" - def _get_versions(self, pub, ccancel=None, alt_repo=None): - """Implementation of get_versions""" + fail = tx.TransportFailures() + vd = None - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - failures = tx.TransportFailures() - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) + if "total" in self.repo_status.setdefault(pub.prefix, {}): + self.repo_status[pub.prefix]["total"] += 1 + else: + self.repo_status[pub.prefix]["total"] = 1 - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() + try: + vd = self._get_versions(pub, ccancel=ccancel) + except tx.TransportException as ex: + if isinstance(ex, tx.TransportFailures): + fail.extend(ex.exceptions) + else: + fail.append(ex) + except apx.CanceledException: + raise + + if not vd or not self._valid_versions_test(vd): + exc = apx.InvalidDepotResponseException( + pub.repository.origins[0].uri, fail + ) + # Publisher names can't start with _ so this is safe. + self.repo_status["_failures"] = None + self.repo_status[pub.prefix].setdefault("errors", set([])).add(exc) + raise exc + + @staticmethod + def _valid_versions_test(versdict): + """Check that the versions information contained in + versdict contains valid version specifications. + + In order to test for this condition, pick a publisher + from the list of active publishers. Check to see if + we can connect to it. If so, test to see if it supports + the versions/0 operation. If versions/0 is not found, + we get an unparseable response, or the response does + not contain pkg-server, or versions 0 then we're not + talking to a depot. Return an error in these cases.""" + + if "pkg-server" in versdict: + # success! + return True + elif "versions" in versdict: + try: + versids = [int(v) for v in versdict["versions"]] + except ValueError: + # Unable to determine version number. Fail. + return False - # If version check hasn't been executed, run it prior to this - # operation. - self._version_check_all(ccancel=ccancel, alt_repo=alt_repo) + if 0 not in versids: + # Paranoia. Version 0 should be in the + # output for versions/0. If we're here, + # something has gone very wrong. EPIC FAIL! + return False - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, alt_repo=alt_repo): + # Found versions/0, success! + return True - repostats = self.stats[d.get_repouri_key()] - header = Transport.__get_request_header(header, - repostats, retries, d) + # Some other error encountered. Fail. + return False - # If a transport exception occurs, - # save it if it's retryable, otherwise - # raise the error to a higher-level handler. - try: - vers = self.__get_version(d, header, - ccancel=ccancel) - # Save this information for later use, too. - self.__fill_repo_vers(d, vers) - return vers - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - for f in ex.failures: - f.url = d.get_url() - failures.append(f) - - except tx.InvalidContentException as e: - repostats.record_error(content=True) - e.reason = "Unable to parse repository's " \ - "versions/0 response" - failures.append(e) - - except tx.TransportException as e: - e.url = d.get_url() - if e.retryable: - failures.append(e) - else: - raise - raise failures - - @staticmethod - def __get_version(repo, header=None, ccancel=None): - """An internal method that returns a versions dictionary - given a transport repo object.""" - - resp = repo.get_versions(header, ccancel=ccancel) - verlines = resp.readlines() + def multi_file(self, fmri, progtrack, ccancel, alt_repo=None): + """Creates a MultiFile object for this transport. + The caller may add actions to the multifile object + and wait for the download to complete.""" - try: - return dict( - s.split(None, 1) - for s in (l.strip() for l in verlines) - ) - except ValueError as e: - raise tx.InvalidContentException(e) + if not fmri: + return None - def __fill_repo_vers(self, repo, vers=None, ccancel=None): - """Download versions information for the transport - repository object and store that information inside - of it.""" + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() - # Call __get_version to get the version dictionary - # from the repo. + if not alt_repo: + alt_repo = self.cfg.get_pkg_alt_repo(fmri) - if not vers: - try: - vers = self.__get_version(repo, ccancel=ccancel) - except tx.InvalidContentException: - raise tx.PkgProtoError(repo.get_url(), - "versions", 0, - "InvalidContentException while parsing " - "response") - - for key, val in vers.items(): - # Don't turn this line into a list of versions. - if key == "pkg-server": - continue + try: + pub = self.cfg.get_publisher(fmri.publisher) + except apx.UnknownPublisher: + # Allow publishers that don't exist in configuration + # to be used so that if data exists in the cache for + # them, the operation will still succeed. This only + # needs to be done here as multi_file_ni is only used + # for publication tools. + pub = publisher.Publisher(fmri.publisher) + + mfile = MultiFile( + pub, self, progtrack, ccancel, alt_repo=alt_repo, pfmri=fmri + ) + + return mfile + + def multi_file_ni( + self, + publisher, + final_dir, + decompress=False, + progtrack=None, + ccancel=None, + alt_repo=None, + ): + """Creates a MultiFileNI object for this transport. + The caller may add actions to the multifile object + and wait for the download to complete. + + This is used by callers who want to download files, + but not install them through actions.""" + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + mfile = MultiFileNI( + publisher, + self, + final_dir, + decompress=decompress, + progtrack=progtrack, + ccancel=ccancel, + alt_repo=alt_repo, + ) + + return mfile + + def _action_cached(self, action, pub, in_hash=None, verify=None): + """If a file with the name action.hash is cached, and if it has + the same content hash as action.chash, then return the path to + the file. If the file can't be found, return None. + + The in_hash parameter allows an alternative hash to be used to + check if this action is cached. This is used for actions which + have more than one effective payload. + + The verify parameter specifies whether the payload of the action + should be validated if needed. The content of readonly caches + will not be validated now; package operations will validate the + content later at the time of installation or update and fail if + it is invalid.""" + + hash_attr, hash_val, hash_func = digest.get_least_preferred_hash(action) + + if in_hash: + hash_val = in_hash + + for cache in self.cfg.get_caches(pub=pub, readonly=True): + cache_path = cache.lookup(hash_val) + if not cache_path: + continue + if verify is None: + # Assume readonly caches are valid (likely a + # file:// repository). The content will be + # validated later at the time of install / + # update, so if it isn't valid here, there's + # nothing we can do anyway since it's likely the + # repository we would retrieve it from. This + # can be a significant performance improvement + # when using NFS repositories. + verify = not cache.readonly + + try: + if verify: + self._verify_content(action, cache_path) + return cache_path + except tx.InvalidContentException: + # If the content in the cache doesn't match the + # hash of the action, verify will have already + # purged the item from the cache. + pass + return None + + @staticmethod + def _make_opener(cache_path): + if cache_path is None: + return + + def opener(): + f = open(cache_path, "rb") + return f + + return opener + + def action_cached(self, fmri, action): + """If a file with the name action.hash is cached, and if it has + the same content hash as action.chash, then return the path to + the file. If the file can't be found, return None. + + 'fmri' is a FMRI object for the package that delivers the + action. + + 'action' is an action object to retrieve the cache file path + for.""" - try: - versids = [ - int(v) - for v in val.split() - ] - except ValueError: - raise tx.PkgProtoError(repo.get_url(), - "versions", 0, - "Unable to parse version ids.") - - # Insert the list back into the dictionary. - versids.sort(reverse=True) - vers[key] = versids - - repo.add_version_data(vers) - - def __gen_repo(self, pub, count, prefer_remote=False, origin_only=False, - single_repository=False, operation=None, versions=None, - ccancel=None, alt_repo=None): - """An internal method that returns the list of Repo objects - for a given Publisher. Callers use this method to generate - lists of endpoints for transport operations, and to retry - operations to a single endpoint. - - The 'pub' argument is a Publisher object or RepositoryURI - object. This is used to lookup a transport.Repo object. - - The 'count' argument determines how many times the routine - will iterate through a list of endpoints. The number of times - we've iterated when calling this method is included in the - tuple that is yielded. - - 'prefer_remote' is an optional boolean value indicating whether - network-based sources are preferred over local sources. If - True, network-based origins will be returned first after the - default order criteria has been applied. This is a very - special case operation, and should not be used liberally. - - 'origin_only' returns only endpoints that are Origins. - This allows the caller to exclude mirrors from the list, - for operations that are meta-data only. - - If callers are performing a publication operation and want - to ensure that only one Repository is used as an endpoint, - 'single_repository' should be set to True. - - If callers wish to only obtain repositories that support - a particular version of an operation, they should supply - the operation's name as a string to the 'operation' argument. - The 'versions' argument should contain the desired available - versions for the operation. This must be given as integers - in a list. - - If a versioned operation is requested, this routine may have - to perform network operations to complete the request. If - cancellation is desired, a cancellation object should be - passed in the 'ccancel' argument. - - By default, this routine looks at a Publisher's - repository. If the caller would like to use a - different Repository object, it should pass one in - 'alt_repo.' - - This function returns a tuple containing a Repo object and - the number of times we've iterated through the endpoints. If - versions and operation are specified, it returns a tuple of - (Repo, iteration, highest supported version). - """ - - if not self.__engine: - self.__setup() - - # If alt_repo supplied, use that as the Repository. - # Otherwise, check that a Publisher was passed, and use - # its repository. - repo = None - if alt_repo: - repo = alt_repo - elif isinstance(pub, publisher.Publisher): - repo = pub.repository - if not repo: - raise apx.NoPublisherRepositories(pub) - - if repo and origin_only: - repolist = repo.origins - origins = repo.origins - if single_repository: - assert len(repolist) == 1 - elif repo: - repolist = repo.mirrors[:] - repolist.extend(repo.origins) - repolist.extend(self.__dynamic_mirrors) - origins = repo.origins + try: + pub = self.cfg.get_publisher(fmri.publisher) + except apx.UnknownPublisher: + # Allow publishers that don't exist in configuration + # to be used so that if data exists in the cache for + # them, the operation will still succeed. This only + # needs to be done here as multi_file_ni is only used + # for publication tools. + pub = publisher.Publisher(fmri.publisher) + + # cache content has already been verified + return self._make_opener(self._action_cached(action, pub, verify=False)) + + def _verify_content(self, action, filepath): + """If action contains an attribute that has the compressed + hash, read the file specified in filepath and verify + that the hash values match. If the values do not match, + remove the file and raise an InvalidContentException.""" + + chash_attr, chash, chash_func = digest.get_preferred_hash( + action, hash_type=digest.CHASH + ) + if action.name == "signature": + # + # If we're checking a signature action and the filepath + # parameter points to one of the chain certificates, we + # need to verify against the most-preferred + # [pkg.]chain.chash[.] attribute that corresponds + # to the filepath we're looking at. We determine the + # index of the least-preferred chain hash that matches + # our filename, and use the most-preferred chash to + # verify against. + # + # i.e. if we have attributes: + # chain="a.a b.b c.c" + # chain.chash="aa bb cc" \ + # pkg.chain.chashes.sha512t_256="AA BB CC" + # + # and we're looking at file "b.b" then we must compare + # our computed value against the "BB" chash. + # + name = os.path.basename(filepath) + found = False + assert len(action.get_chain_certs(least_preferred=True)) == len( + action.get_chain_certs_chashes() + ) + for n, c in zip( + action.get_chain_certs(least_preferred=True), + action.get_chain_certs_chashes(), + ): + if name == n: + found = True + chash = c + break + path = action.attrs.get("path", None) + if not chash: + # Compressed hash doesn't exist. Decompress and + # generate hash of uncompressed content. + ifile = open(filepath, "rb") + ofile = open(os.devnull, "wb") + + try: + hash_attr, hash_val, hash_func = digest.get_preferred_hash( + action, hash_type=digest.HASH + ) + fhash = misc.gunzip_from_stream( + ifile, ofile, hash_func=hash_func + ) + except zlib.error as e: + s = os.stat(filepath) + portable.remove(filepath) + raise tx.InvalidContentException( + path, + "zlib.error:{0}".format(" ".join([str(a) for a in e.args])), + size=s.st_size, + ) + + ifile.close() + ofile.close() + + if hash_val != fhash: + s = os.stat(filepath) + portable.remove(filepath) + raise tx.InvalidContentException( + action.path, + "hash failure: expected: {0}" + " computed: {1}".format(hash, fhash), + size=s.st_size, + ) + return + + newhash = misc.get_data_digest(filepath, hash_func=chash_func)[0] + if chash != newhash: + s = os.stat(filepath) + # Check whether we're using the path as a part of the + # content cache, or whether we're actually looking at a + # file:// repository. It's safe to remove the corrupted + # file only if it is part of a cache. Otherwise, + # "pkgrepo verify/fix" should be used to check + # repositories. + cache_fms = self.cfg.get_caches(readonly=False) + remove_content = False + for fm in cache_fms: + if filepath.startswith(fm.root): + remove_content = True + if remove_content: + portable.remove(filepath) + raise tx.InvalidContentException( + path, + "chash failure: expected: {0} computed: {1}".format( + chash, newhash + ), + size=s.st_size, + ) + + @LockedTransport() + def publish_add( + self, pub, action=None, ccancel=None, progtrack=None, trans_id=None + ): + """Perform the 'add' publication operation to the publisher + supplied in pub. The transaction-id is passed in trans_id.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + if progtrack and ccancel: + progtrack.check_cancelation = ccancel + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="add", + versions=[0], + ): + try: + d.publish_add( + action, + header=header, + progtrack=progtrack, + trans_id=trans_id, + ) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) else: - # Caller passed RepositoryURI object in as - # pub argument, repolist is the RepoURI - repolist = [pub] - origins = repolist - - repolist = _convert_repouris(repolist) - origins = _convert_repouris(origins) - - def remote_first(a, b): - # For now, any URI using the file scheme is considered - # local. Realistically, it could be an NFS mount, etc. - # However, that's a further refinement that can be done - # later. - aremote = a[0].scheme != "file" - bremote = b[0].scheme != "file" - return misc.cmp(aremote, bremote) * -1 - - if versions: - versions = sorted(versions, reverse=True) - - fail = None - iteration = 0 - for i in range(count): - iteration += 1 - rslist = self.stats.get_repostats(repolist, origins) - if prefer_remote: - rslist.sort(key=cmp_to_key(remote_first)) - - fail = tx.TransportFailures() - repo_found = False - for rs, ruri in rslist: - if operation and versions: - repo = self.__repo_cache.new_repo(rs, - ruri) - if not repo.has_version_data(): - try: - self.__fill_repo_vers( - repo, - ccancel=ccancel) - except tx.TransportException as ex: - # Encountered a - # transport error while - # trying to contact this - # origin. Save the - # errors on each retry - # so that they can be - # raised instead of - # an unsupported - # operation error. - if isinstance(ex, - tx.TransportFailures): - fail.extend( - ex.exceptions) - else: - fail.append(ex) - continue - - verid = repo.supports_version(operation, - versions) - if verid >= 0: - repo_found = True - yield repo, iteration, verid - else: - repo_found = True - yield self.__repo_cache.new_repo(rs, - ruri), iteration - - if not repo_found and fail: - raise fail - - if not origins and \ - isinstance(pub, publisher.Publisher): - # Special error case; no transport configuration - # available for this publisher. - raise apx.NoPublisherRepositories(pub) - - if not repo_found and operation and versions: - # If a versioned operation was requested and - # wasn't found, then raise an unsupported - # exception using the newest version allowed. - raise apx.UnsupportedRepositoryOperation(pub, - "{0}/{1:d}".format(operation, versions[-1])) - - def __chunk_size(self, pub, alt_repo=None, origin_only=False): - """Determine the chunk size based upon how many of the known - mirrors have been visited. If not all mirrors have been - visited, choose a small size so that if it ends up being - a poor choice, the client doesn't transfer too much data.""" - - CHUNK_SMALL = 10 - CHUNK_LARGE = 100 - CHUNK_HUGE = 1024 - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - if alt_repo: - repolist = alt_repo.origins[:] - if not origin_only: - repolist.extend(alt_repo.mirrors) - elif isinstance(pub, publisher.Publisher): - repo = pub.repository - if not repo: - raise apx.NoPublisherRepositories(pub) - repolist = repo.origins[:] - if not origin_only: - repolist.extend(repo.mirrors) + raise + + raise failures + + @LockedTransport() + def publish_add_file( + self, pub, pth, trans_id=None, basename=None, progtrack=None + ): + """Perform the 'add_file' publication operation to the publisher + supplied in pub. The caller should include the path in the + pth argument. The transaction-id is passed in trans_id.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="file", + versions=[1], + ): + try: + d.publish_add_file( + pth, + header=header, + trans_id=trans_id, + basename=basename, + progtrack=progtrack, + ) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) else: - # If caller passed RepositoryURI object in as - # pub argument, repolist is the RepoURI. - repolist = [pub] - - repolist = _convert_repouris(repolist) - n = len(repolist) - m = self.stats.get_num_visited(repolist) - if n == 1: - return CHUNK_HUGE - if m < n: - return CHUNK_SMALL - return CHUNK_LARGE - - @LockedTransport() - def valid_publisher_test(self, pub, ccancel=None): - """Test that the publisher supplied in pub actually - points to a valid packaging server.""" - - try: - vd = self._get_versions(pub, ccancel=ccancel) - except tx.TransportException as e: - # Failure when contacting server. Report - # this as an error. Attempt to report - # the specific origin that failed, and - # if not available, fallback to the - # first one for the publisher. - url = getattr(e, "url", pub["origin"]) - raise apx.InvalidDepotResponseException(url, - "Transport errors encountered when trying to " - "contact repository.\nReported the following " - "errors:\n{0}".format(e)) - - if not self._valid_versions_test(vd): - url = pub["origin"] - raise apx.InvalidDepotResponseException(url, - "Invalid or unparseable version information.") - - return True - - def _version_check_all(self, alt_repo=None, ccancel=None): - # Retrieve version info for all publishers to fill version info - # and test if repositories are responding. - - if self.__version_check_executed: - return - - self.__version_check_executed = True - - pubs = [pub for pub in self.cfg.gen_publishers()] - if not pubs and alt_repo: - # Special case -- no configured publishers exist, but - # an alternate package source was specified, so create - # a temporary publisher using alternate repository. - pubs = [publisher.Publisher("temporary", - repository=alt_repo)] - - fail = tx.TransportFailures() - for pub in pubs: - if pub.prefix in self.repo_status: - # This publisher has already been tested, ignore - continue - for origin in pub.repository.origins: - p = copy.copy(pub) - p.repository.origins = [origin] - try: - self._version_check(p, ccancel=ccancel) - except apx.InvalidDepotResponseException as e: - # If there is a network connection issue - # with this repo ignore here. It will - # get recorded in self.repo_status. - pass - - def version_check(self, pub, ccancel=None): - """Retrieve version info from publisher and fill internal - version caches. If we encounter problems contacting the repo, - store that information for later.""" - self._lock.acquire() - try: - self._version_check(pub, ccancel=ccancel) - finally: - self._lock.release() - - def _version_check(self, pub, ccancel=None): - """Implementation of version check.""" - - fail = tx.TransportFailures() - vd = None - - if "total" in self.repo_status.setdefault(pub.prefix, {}): - self.repo_status[pub.prefix]["total"] += 1 + raise + + raise failures + + @LockedTransport() + def publish_add_manifest(self, pub, pth, trans_id=None): + """Perform the 'add_manifest' publication operation to the publisher + supplied in pub. The caller should include the path in the + pth argument. The transaction-id is passed in trans_id.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header(uuid=self.__get_uuid(pub)) + + # Call setup if the transport isn't configured or was shutdown. + if not self.__engine: + self.__setup() + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="manifest", + versions=[1], + ): + try: + d.publish_add_manifest(pth, header=header, trans_id=trans_id) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_abandon(self, pub, trans_id=None): + """Perform an 'abandon' publication operation to the + publisher supplied in the pub argument. The caller should + also include the transaction id in trans_id.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="abandon", + versions=[0], + ): + try: + state, fmri = d.publish_abandon( + header=header, trans_id=trans_id + ) + return state, fmri + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_close( + self, pub, trans_id=None, refresh_index=False, add_to_catalog=False + ): + """Perform a 'close' publication operation to the + publisher supplied in the pub argument. The caller should + also include the transaction id in trans_id. If add_to_catalog + is true, the pkg will be added to the catalog once + the transactions close. Not all transport methods + recognize this parameter.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="close", + versions=[0], + ): + try: + state, fmri = d.publish_close( + header=header, + trans_id=trans_id, + add_to_catalog=add_to_catalog, + ) + return state, fmri + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_open(self, pub, client_release=None, pkg_name=None): + """Perform an 'open' transaction to start a publication + transaction to the publisher named in pub. The caller should + supply the client's OS release in client_release, and the + package's name in pkg_name.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="open", + versions=[0], + ): + try: + trans_id = d.publish_open( + header=header, + client_release=client_release, + pkg_name=pkg_name, + ) + return trans_id + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + raise failures + + @LockedTransport() + def publish_rebuild(self, pub): + """Instructs the repositories named by Publisher pub + to rebuild package and search data.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="admin", + versions=[0], + ): + try: + d.publish_rebuild(header=header, pub=pub) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) else: - self.repo_status[pub.prefix]["total"] = 1 + raise + + raise failures + + @LockedTransport() + def publish_append(self, pub, client_release=None, pkg_name=None): + """Perform an 'append' transaction to start a publication + transaction to the publisher named in pub. The caller should + supply the client's OS release in client_release, and the + package's name in pkg_name.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + # Call setup if transport isn't configured, or was shutdown. + if not self.__engine: + self.__setup() + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="append", + versions=[0], + ): + try: + trans_id = d.publish_append( + header=header, + client_release=client_release, + pkg_name=pkg_name, + ) + return trans_id + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_rebuild_indexes(self, pub): + """Instructs the repositories named by Publisher pub + to rebuild their search indexes.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="admin", + versions=[0], + ): + try: + d.publish_rebuild_indexes(header=header, pub=pub) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_rebuild_packages(self, pub): + """Instructs the repositories named by Publisher pub + to rebuild package data.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="admin", + versions=[0], + ): + try: + d.publish_rebuild_packages(header=header, pub=pub) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_refresh(self, pub): + """Instructs the repositories named by Publisher pub + to refresh package and search data.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="admin", + versions=[0], + ): + try: + d.publish_refresh(header=header, pub=pub) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_refresh_indexes(self, pub): + """Instructs the repositories named by Publisher pub + to refresh their search indexes.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + # In this case, the operation and versions keywords are + # purposefully avoided as the underlying repo function + # will automatically determine what operation to use + # for the single origin returned by __gen_repo. + for d, retries in self.__gen_repo( + pub, retry_count, origin_only=True, single_repository=True + ): + try: + d.publish_refresh_indexes(header=header, pub=pub) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise + + raise failures + + @LockedTransport() + def publish_refresh_packages(self, pub): + """Instructs the repositories named by Publisher pub + to refresh package data.""" + + failures = tx.TransportFailures() + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + header = self.__build_header( + uuid=self.__get_uuid(pub), variant=self.__get_variant(pub) + ) + + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation="admin", + versions=[0], + ): + try: + d.publish_refresh_packages(header=header, pub=pub) + return + except tx.ExcessiveTransientFailure as ex: + # If an endpoint experienced so many failures + # that we just gave up, grab the list of + # failures that it contains + failures.extend(ex.failures) + except tx.TransportException as e: + if e.retryable: + failures.append(e) + else: + raise - try: - vd = self._get_versions(pub, ccancel=ccancel) - except tx.TransportException as ex: - if isinstance(ex, tx.TransportFailures): - fail.extend(ex.exceptions) - else: - fail.append(ex) - except apx.CanceledException: - raise - - if not vd or not self._valid_versions_test(vd): - exc = apx.InvalidDepotResponseException( - pub.repository.origins[0].uri, fail) - # Publisher names can't start with _ so this is safe. - self.repo_status["_failures"] = None - self.repo_status[pub.prefix].setdefault( - "errors", set([])).add(exc) - raise exc - - @staticmethod - def _valid_versions_test(versdict): - """Check that the versions information contained in - versdict contains valid version specifications. - - In order to test for this condition, pick a publisher - from the list of active publishers. Check to see if - we can connect to it. If so, test to see if it supports - the versions/0 operation. If versions/0 is not found, - we get an unparseable response, or the response does - not contain pkg-server, or versions 0 then we're not - talking to a depot. Return an error in these cases.""" - - if "pkg-server" in versdict: - # success! - return True - elif "versions" in versdict: - try: - versids = [ - int(v) - for v in versdict["versions"] - ] - except ValueError: - # Unable to determine version number. Fail. - return False - - if 0 not in versids: - # Paranoia. Version 0 should be in the - # output for versions/0. If we're here, - # something has gone very wrong. EPIC FAIL! - return False - - # Found versions/0, success! - return True - - # Some other error encountered. Fail. - return False + raise failures - def multi_file(self, fmri, progtrack, ccancel, alt_repo=None): - """Creates a MultiFile object for this transport. - The caller may add actions to the multifile object - and wait for the download to complete.""" + def publish_cache_repository(self, pub, repo): + """If the caller needs to override the underlying Repository + object kept by the transport, it should use this method + to replace the cached Repository object.""" - if not fmri: - return None + assert isinstance(pub, publisher.Publisher) - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() + if not self.__engine: + self.__setup() - if not alt_repo: - alt_repo = self.cfg.get_pkg_alt_repo(fmri) + origins = _convert_repouris([pub.repository.origins[0]]) + rslist = self.stats.get_repostats(origins, origins) + rs, ruri = rslist[0] - try: - pub = self.cfg.get_publisher(fmri.publisher) - except apx.UnknownPublisher: - # Allow publishers that don't exist in configuration - # to be used so that if data exists in the cache for - # them, the operation will still succeed. This only - # needs to be done here as multi_file_ni is only used - # for publication tools. - pub = publisher.Publisher(fmri.publisher) - - mfile = MultiFile(pub, self, progtrack, ccancel, - alt_repo=alt_repo, pfmri=fmri) - - return mfile - - def multi_file_ni(self, publisher, final_dir, decompress=False, - progtrack=None, ccancel=None, alt_repo=None): - """Creates a MultiFileNI object for this transport. - The caller may add actions to the multifile object - and wait for the download to complete. - - This is used by callers who want to download files, - but not install them through actions.""" - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - mfile = MultiFileNI(publisher, self, final_dir, - decompress=decompress, progtrack=progtrack, ccancel=ccancel, - alt_repo=alt_repo) - - return mfile - - def _action_cached(self, action, pub, in_hash=None, verify=None): - """If a file with the name action.hash is cached, and if it has - the same content hash as action.chash, then return the path to - the file. If the file can't be found, return None. - - The in_hash parameter allows an alternative hash to be used to - check if this action is cached. This is used for actions which - have more than one effective payload. - - The verify parameter specifies whether the payload of the action - should be validated if needed. The content of readonly caches - will not be validated now; package operations will validate the - content later at the time of installation or update and fail if - it is invalid.""" - - hash_attr, hash_val, hash_func = \ - digest.get_least_preferred_hash(action) - - if in_hash: - hash_val = in_hash - - for cache in self.cfg.get_caches(pub=pub, readonly=True): - cache_path = cache.lookup(hash_val) - if not cache_path: - continue - if verify is None: - # Assume readonly caches are valid (likely a - # file:// repository). The content will be - # validated later at the time of install / - # update, so if it isn't valid here, there's - # nothing we can do anyway since it's likely the - # repository we would retrieve it from. This - # can be a significant performance improvement - # when using NFS repositories. - verify = not cache.readonly + self.__repo_cache.update_repo(rs, ruri, repo) - try: - if verify: - self._verify_content(action, cache_path) - return cache_path - except tx.InvalidContentException: - # If the content in the cache doesn't match the - # hash of the action, verify will have already - # purged the item from the cache. - pass - return None - - @staticmethod - def _make_opener(cache_path): - if cache_path is None: - return - def opener(): - f = open(cache_path, "rb") - return f - return opener - - def action_cached(self, fmri, action): - """If a file with the name action.hash is cached, and if it has - the same content hash as action.chash, then return the path to - the file. If the file can't be found, return None. - - 'fmri' is a FMRI object for the package that delivers the - action. - - 'action' is an action object to retrieve the cache file path - for.""" + def publish_cache_contains(self, pub): + """Returns true if the publisher's origin is cached + in the repo cache.""" - try: - pub = self.cfg.get_publisher(fmri.publisher) - except apx.UnknownPublisher: - # Allow publishers that don't exist in configuration - # to be used so that if data exists in the cache for - # them, the operation will still succeed. This only - # needs to be done here as multi_file_ni is only used - # for publication tools. - pub = publisher.Publisher(fmri.publisher) - - # cache content has already been verified - return self._make_opener(self._action_cached(action, pub, - verify=False)) - - def _verify_content(self, action, filepath): - """If action contains an attribute that has the compressed - hash, read the file specified in filepath and verify - that the hash values match. If the values do not match, - remove the file and raise an InvalidContentException.""" - - chash_attr, chash, chash_func = digest.get_preferred_hash( - action, hash_type=digest.CHASH) - if action.name == "signature": - # - # If we're checking a signature action and the filepath - # parameter points to one of the chain certificates, we - # need to verify against the most-preferred - # [pkg.]chain.chash[.] attribute that corresponds - # to the filepath we're looking at. We determine the - # index of the least-preferred chain hash that matches - # our filename, and use the most-preferred chash to - # verify against. - # - # i.e. if we have attributes: - # chain="a.a b.b c.c" - # chain.chash="aa bb cc" \ - # pkg.chain.chashes.sha512t_256="AA BB CC" - # - # and we're looking at file "b.b" then we must compare - # our computed value against the "BB" chash. - # - name = os.path.basename(filepath) - found = False - assert len(action.get_chain_certs( - least_preferred=True)) == \ - len(action.get_chain_certs_chashes()) - for n, c in zip( - action.get_chain_certs(least_preferred=True), - action.get_chain_certs_chashes()): - if name == n: - found = True - chash = c - break - path = action.attrs.get("path", None) - if not chash: - # Compressed hash doesn't exist. Decompress and - # generate hash of uncompressed content. - ifile = open(filepath, "rb") - ofile = open(os.devnull, "wb") + if not self.__engine: + self.__setup() - try: - hash_attr, hash_val, hash_func = \ - digest.get_preferred_hash(action, - hash_type=digest.HASH) - fhash = misc.gunzip_from_stream(ifile, ofile, - hash_func=hash_func) - except zlib.error as e: - s = os.stat(filepath) - portable.remove(filepath) - raise tx.InvalidContentException(path, - "zlib.error:{0}".format( - " ".join([str(a) for a in e.args])), - size=s.st_size) - - ifile.close() - ofile.close() - - if hash_val != fhash: - s = os.stat(filepath) - portable.remove(filepath) - raise tx.InvalidContentException(action.path, - "hash failure: expected: {0}" - " computed: {1}".format(hash, fhash), - size=s.st_size) - return - - newhash = misc.get_data_digest(filepath, - hash_func=chash_func)[0] - if chash != newhash: - s = os.stat(filepath) - # Check whether we're using the path as a part of the - # content cache, or whether we're actually looking at a - # file:// repository. It's safe to remove the corrupted - # file only if it is part of a cache. Otherwise, - # "pkgrepo verify/fix" should be used to check - # repositories. - cache_fms = self.cfg.get_caches(readonly=False) - remove_content = False - for fm in cache_fms: - if filepath.startswith(fm.root): - remove_content = True - if remove_content: - portable.remove(filepath) - raise tx.InvalidContentException(path, - "chash failure: expected: {0} computed: {1}".format( - chash, newhash), size=s.st_size) - - @LockedTransport() - def publish_add(self, pub, action=None, ccancel=None, progtrack=None, - trans_id=None): - """Perform the 'add' publication operation to the publisher - supplied in pub. The transaction-id is passed in trans_id.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - if progtrack and ccancel: - progtrack.check_cancelation = ccancel - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="add", - versions=[0]): - try: - d.publish_add(action, header=header, - progtrack=progtrack, trans_id=trans_id) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_add_file(self, pub, pth, trans_id=None, basename=None, - progtrack=None): - """Perform the 'add_file' publication operation to the publisher - supplied in pub. The caller should include the path in the - pth argument. The transaction-id is passed in trans_id.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="file", - versions=[1]): - try: - d.publish_add_file(pth, header=header, - trans_id=trans_id, basename=basename, - progtrack=progtrack) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_add_manifest(self, pub, pth, trans_id=None): - """Perform the 'add_manifest' publication operation to the publisher - supplied in pub. The caller should include the path in the - pth argument. The transaction-id is passed in trans_id.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub)) - - # Call setup if the transport isn't configured or was shutdown. - if not self.__engine: - self.__setup() - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, - operation="manifest", versions=[1]): - try: - d.publish_add_manifest(pth, header=header, - trans_id=trans_id) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_abandon(self, pub, trans_id=None): - """Perform an 'abandon' publication operation to the - publisher supplied in the pub argument. The caller should - also include the transaction id in trans_id.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, - operation="abandon", versions=[0]): - try: - state, fmri = d.publish_abandon(header=header, - trans_id=trans_id) - return state, fmri - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_close(self, pub, trans_id=None, refresh_index=False, - add_to_catalog=False): - """Perform a 'close' publication operation to the - publisher supplied in the pub argument. The caller should - also include the transaction id in trans_id. If add_to_catalog - is true, the pkg will be added to the catalog once - the transactions close. Not all transport methods - recognize this parameter.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="close", - versions=[0]): - try: - state, fmri = d.publish_close(header=header, - trans_id=trans_id, - add_to_catalog=add_to_catalog) - return state, fmri - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_open(self, pub, client_release=None, pkg_name=None): - """Perform an 'open' transaction to start a publication - transaction to the publisher named in pub. The caller should - supply the client's OS release in client_release, and the - package's name in pkg_name.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="open", - versions=[0]): - try: - trans_id = d.publish_open(header=header, - client_release=client_release, - pkg_name=pkg_name) - return trans_id - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - raise failures - - @LockedTransport() - def publish_rebuild(self, pub): - """Instructs the repositories named by Publisher pub - to rebuild package and search data.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="admin", - versions=[0]): - try: - d.publish_rebuild(header=header, pub=pub) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_append(self, pub, client_release=None, pkg_name=None): - """Perform an 'append' transaction to start a publication - transaction to the publisher named in pub. The caller should - supply the client's OS release in client_release, and the - package's name in pkg_name.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - # Call setup if transport isn't configured, or was shutdown. - if not self.__engine: - self.__setup() - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, - operation="append", versions=[0]): - try: - trans_id = d.publish_append(header=header, - client_release=client_release, - pkg_name=pkg_name) - return trans_id - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_rebuild_indexes(self, pub): - """Instructs the repositories named by Publisher pub - to rebuild their search indexes.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="admin", - versions=[0]): - try: - d.publish_rebuild_indexes(header=header, - pub=pub) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_rebuild_packages(self, pub): - """Instructs the repositories named by Publisher pub - to rebuild package data.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="admin", - versions=[0]): - try: - d.publish_rebuild_packages(header=header, - pub=pub) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_refresh(self, pub): - """Instructs the repositories named by Publisher pub - to refresh package and search data.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="admin", - versions=[0]): - try: - d.publish_refresh(header=header, pub=pub) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_refresh_indexes(self, pub): - """Instructs the repositories named by Publisher pub - to refresh their search indexes.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - # In this case, the operation and versions keywords are - # purposefully avoided as the underlying repo function - # will automatically determine what operation to use - # for the single origin returned by __gen_repo. - for d, retries in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True): - try: - d.publish_refresh_indexes(header=header, - pub=pub) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - @LockedTransport() - def publish_refresh_packages(self, pub): - """Instructs the repositories named by Publisher pub - to refresh package data.""" - - failures = tx.TransportFailures() - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - header = self.__build_header(uuid=self.__get_uuid(pub), - variant=self.__get_variant(pub)) - - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, operation="admin", - versions=[0]): - try: - d.publish_refresh_packages(header=header, - pub=pub) - return - except tx.ExcessiveTransientFailure as ex: - # If an endpoint experienced so many failures - # that we just gave up, grab the list of - # failures that it contains - failures.extend(ex.failures) - except tx.TransportException as e: - if e.retryable: - failures.append(e) - else: - raise - - raise failures - - def publish_cache_repository(self, pub, repo): - """If the caller needs to override the underlying Repository - object kept by the transport, it should use this method - to replace the cached Repository object.""" - - assert(isinstance(pub, publisher.Publisher)) - - if not self.__engine: - self.__setup() - - origins = _convert_repouris([pub.repository.origins[0]]) - rslist = self.stats.get_repostats(origins, origins) - rs, ruri = rslist[0] - - self.__repo_cache.update_repo(rs, ruri, repo) - - def publish_cache_contains(self, pub): - """Returns true if the publisher's origin is cached - in the repo cache.""" - - if not self.__engine: - self.__setup() - - # we need to check that all TransportRepoURIs are present - turis = _convert_repouris([pub.repository.origins[0]]) - for turi in turis: - if turi not in self.__repo_cache: - return False - return True - - def supports_version(self, pub, op, verlist): - """Returns version-id of highest supported version. - If the version is not supported, or no data is available, - -1 is returned instead.""" - - retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT - - # Call setup if transport isn't configured, or was shutdown. - if not self.__engine: - self.__setup() - - # For backward compatibility, we pass version 0 to __gen_repo - # so that unsupported operation exception won't be raised if - # higher version is not supported, such as manifest/1. - for d, retries, v in self.__gen_repo(pub, retry_count, - origin_only=True, single_repository=True, - operation=op, versions=[0]): - return d.supports_version(op, verlist) - - def get_transfer_info(self, pub): - """Return a tuple of (compressed, hashes) where 'compressed' - indicates whether files can be transferred compressed and - 'hashes', the set of hashes of those actions that will have - their payload transferred.""" - - compressed = self.supports_version(pub, 'manifest', [1]) > -1 - return compressed, self.__hashes[pub] - - def get_transfer_size(self, pub, actions): - """Return estimated transfer size given a list of actions that - will have their payload transferred.""" - - for d, retries in self.__gen_repo(pub, 1, - origin_only=True, single_repository=True): - scheme, netloc, path, params, query, fragment = \ - urlparse(d._url, "http", allow_fragments=0) - break - - local = scheme == "file" - sendb = 0 - uploaded = 0 - support = self.supports_version(pub, "manifest", [1]) > -1 - for a in actions: - if not a.has_payload: - continue - if not support: - sendb += int(a.attrs.get("pkg.size", 0)) - continue - if a.hash not in self.__hashes[pub]: - if (local or uploaded < - self.cfg.max_transfer_checks): - # If the repository is local - # (filesystem-based) or less than - # max_transfer_checks, call - # get_compressed_attrs()... - has_file, dummy = \ - self.get_compressed_attrs( - a.hash, pub=pub, hashes=False) - if has_file: - continue - # If server doesn't have file, assume it will be - # uploaded. - sendb += int(a.attrs.get("pkg.csize", 0)) - self.__hashes[pub].add(a.hash) - uploaded += 1 - return sendb + # we need to check that all TransportRepoURIs are present + turis = _convert_repouris([pub.repository.origins[0]]) + for turi in turis: + if turi not in self.__repo_cache: + return False + return True + + def supports_version(self, pub, op, verlist): + """Returns version-id of highest supported version. + If the version is not supported, or no data is available, + -1 is returned instead.""" + + retry_count = global_settings.PKG_CLIENT_MAX_TIMEOUT + + # Call setup if transport isn't configured, or was shutdown. + if not self.__engine: + self.__setup() + + # For backward compatibility, we pass version 0 to __gen_repo + # so that unsupported operation exception won't be raised if + # higher version is not supported, such as manifest/1. + for d, retries, v in self.__gen_repo( + pub, + retry_count, + origin_only=True, + single_repository=True, + operation=op, + versions=[0], + ): + return d.supports_version(op, verlist) + + def get_transfer_info(self, pub): + """Return a tuple of (compressed, hashes) where 'compressed' + indicates whether files can be transferred compressed and + 'hashes', the set of hashes of those actions that will have + their payload transferred.""" + + compressed = self.supports_version(pub, "manifest", [1]) > -1 + return compressed, self.__hashes[pub] + + def get_transfer_size(self, pub, actions): + """Return estimated transfer size given a list of actions that + will have their payload transferred.""" + + for d, retries in self.__gen_repo( + pub, 1, origin_only=True, single_repository=True + ): + scheme, netloc, path, params, query, fragment = urlparse( + d._url, "http", allow_fragments=0 + ) + break + + local = scheme == "file" + sendb = 0 + uploaded = 0 + support = self.supports_version(pub, "manifest", [1]) > -1 + for a in actions: + if not a.has_payload: + continue + if not support: + sendb += int(a.attrs.get("pkg.size", 0)) + continue + if a.hash not in self.__hashes[pub]: + if local or uploaded < self.cfg.max_transfer_checks: + # If the repository is local + # (filesystem-based) or less than + # max_transfer_checks, call + # get_compressed_attrs()... + has_file, dummy = self.get_compressed_attrs( + a.hash, pub=pub, hashes=False + ) + if has_file: + continue + # If server doesn't have file, assume it will be + # uploaded. + sendb += int(a.attrs.get("pkg.csize", 0)) + self.__hashes[pub].add(a.hash) + uploaded += 1 + return sendb class MultiXfr(object): - """A transport object for performing multiple simultaneous - requests. This object matches publisher to list of requests, and - allows the caller to associate a piece of data with the request key.""" + """A transport object for performing multiple simultaneous + requests. This object matches publisher to list of requests, and + allows the caller to associate a piece of data with the request key.""" - def __init__(self, pub, progtrack=None, ccancel=None, alt_repo=None): - """Supply the publisher as argument 'pub'.""" + def __init__(self, pub, progtrack=None, ccancel=None, alt_repo=None): + """Supply the publisher as argument 'pub'.""" - self._publisher = pub - self._hash = {} - self._progtrack = progtrack - self._alt_repo = alt_repo - # Add the check_cancelation to the progress tracker - if progtrack and ccancel: - self._progtrack.check_cancelation = ccancel + self._publisher = pub + self._hash = {} + self._progtrack = progtrack + self._alt_repo = alt_repo + # Add the check_cancelation to the progress tracker + if progtrack and ccancel: + self._progtrack.check_cancelation = ccancel - def __contains__(self, key): - return key in self._hash + def __contains__(self, key): + return key in self._hash - def __getitem__(self, key): - return self._hash[key] + def __getitem__(self, key): + return self._hash[key] - def __iter__(self): - for k in self._hash: - yield k + def __iter__(self): + for k in self._hash: + yield k - def __len__(self): - return len(self._hash) + def __len__(self): + return len(self._hash) - # Defining "boolness" of a class, Python 2 uses the special method - # called __nonzero__() while Python 3 uses __bool__(). For Python - # 2 and 3 compatibility, define __bool__() only, and let - # __nonzero__ = __bool__ - def __bool__(self): - return bool(self._hash) + # Defining "boolness" of a class, Python 2 uses the special method + # called __nonzero__() while Python 3 uses __bool__(). For Python + # 2 and 3 compatibility, define __bool__() only, and let + # __nonzero__ = __bool__ + def __bool__(self): + return bool(self._hash) - __nonzero__ = __bool__ + __nonzero__ = __bool__ - def add_hash(self, hashval, item): - """Add 'item' to list of values that exist for - hash value 'hashval'.""" + def add_hash(self, hashval, item): + """Add 'item' to list of values that exist for + hash value 'hashval'.""" - self._hash[hashval] = item + self._hash[hashval] = item - def del_hash(self, hashval): - """Remove the hashval from the dictionary, if it exists.""" + def del_hash(self, hashval): + """Remove the hashval from the dictionary, if it exists.""" - self._hash.pop(hashval, None) + self._hash.pop(hashval, None) - def get_alt_repo(self): - """Return the alternate Repository object, if one has - been selected. Otherwise, return None.""" + def get_alt_repo(self): + """Return the alternate Repository object, if one has + been selected. Otherwise, return None.""" - return self._alt_repo + return self._alt_repo - def get_ccancel(self): - """If the progress tracker has an associated ccancel, - return it. Otherwise, return None.""" + def get_ccancel(self): + """If the progress tracker has an associated ccancel, + return it. Otherwise, return None.""" - return getattr(self._progtrack, "check_cancelation", None) + return getattr(self._progtrack, "check_cancelation", None) - def get_progtrack(self): - """Return the progress tracker object for this MFile, - if it has one.""" + def get_progtrack(self): + """Return the progress tracker object for this MFile, + if it has one.""" - return self._progtrack + return self._progtrack - def get_publisher(self): - """Return the publisher object that will be used - for this MultiFile request.""" + def get_publisher(self): + """Return the publisher object that will be used + for this MultiFile request.""" - return self._publisher + return self._publisher - def keys(self): - """Return a list of the keys in the hash.""" + def keys(self): + """Return a list of the keys in the hash.""" - return list(self._hash.keys()) + return list(self._hash.keys()) class MultiFile(MultiXfr): - """A transport object for performing multi-file requests - using pkg actions. This takes care of matching the publisher - with the actions, and performs the download and content - verification necessary to assure correct content installation.""" - - def __init__(self, pub, xport, progtrack, ccancel, alt_repo=None, - pfmri=None): - """Supply the destination publisher in the pub argument. - The transport object should be passed in xport.""" - - MultiXfr.__init__(self, pub, progtrack=progtrack, - ccancel=ccancel, alt_repo=alt_repo) - - self._transport = xport - self.pfmri = pfmri - - def add_action(self, action): - """The multiple file retrieval operation is asynchronous. - Add files to retrieve with this function. The caller - should pass the action, which causes its file to - be added to an internal retrieval list.""" - - cpath = self._transport._action_cached(action, - self.get_publisher()) - if cpath: - if self._progtrack: - filesz = int(misc.get_pkg_otw_size(action)) - file_cnt = 1 - if action.name == "signature": - filesz += \ - action.get_action_chain_csize() - file_cnt += \ - len(action.attrs.get("chain", - "").split()) - self._progtrack.download_add_progress(file_cnt, - filesz, cachehit=True) - return - - # only retrieve the least preferred hash for this action - hash_attr, hash_val, hash_func = \ - digest.get_least_preferred_hash(action) - self.add_hash(hash_val, action) + """A transport object for performing multi-file requests + using pkg actions. This takes care of matching the publisher + with the actions, and performs the download and content + verification necessary to assure correct content installation.""" + + def __init__( + self, pub, xport, progtrack, ccancel, alt_repo=None, pfmri=None + ): + """Supply the destination publisher in the pub argument. + The transport object should be passed in xport.""" + + MultiXfr.__init__( + self, pub, progtrack=progtrack, ccancel=ccancel, alt_repo=alt_repo + ) + + self._transport = xport + self.pfmri = pfmri + + def add_action(self, action): + """The multiple file retrieval operation is asynchronous. + Add files to retrieve with this function. The caller + should pass the action, which causes its file to + be added to an internal retrieval list.""" + + cpath = self._transport._action_cached(action, self.get_publisher()) + if cpath: + if self._progtrack: + filesz = int(misc.get_pkg_otw_size(action)) + file_cnt = 1 if action.name == "signature": - for c in action.get_chain_certs(least_preferred=True): - self.add_hash(c, action) - - def add_hash(self, hashval, item): - """Add 'item' to list of values that exist for - hash value 'hashval'.""" - - self._hash.setdefault(hashval, []).append(item) - - def file_done(self, hashval, current_path): - """Tell MFile that the transfer completed successfully.""" - - self._update_dlstats(hashval, current_path) - self.del_hash(hashval) - - def _update_dlstats(self, hashval, cache_path): - """Find each action associated with the hash value hashval. - Update the download statistics for this file.""" - - totalsz = 0 - nfiles = 0 - - filesz = os.stat(cache_path).st_size - for action in self._hash[hashval]: - nfiles += 1 - bn = os.path.basename(cache_path) - if action.name != "signature" or action.hash == bn: - totalsz += misc.get_pkg_otw_size(action) - else: - totalsz += action.get_chain_csize(bn) - - # The progress tracker accounts for the sizes of all actions - # even if we only have to perform one download to satisfy - # multiple actions with the same hashval. Since we know - # the size of the file we downloaded, but not necessarily - # the size of the action responsible for the download, - # generate the total size and subtract the size that was - # downloaded. The downloaded size was already accounted for in - # the engine's progress tracking. Adjust the progress tracker - # by the difference between what we have and the total we should - # have received. - nbytes = int(totalsz - filesz) - if self._progtrack: - self._progtrack.download_add_progress((nfiles - 1), - nbytes) - - def subtract_progress(self, size): - """Subtract the progress accumulated by the download of - file with hash of hashval. make_openers accounts for - hashes with multiple actions. If this has been invoked, - it has happened before make_openers, so it's only necessary - to adjust the progress for a single file.""" - - if not self._progtrack: - return - - self._progtrack.download_add_progress(-1, int(-size)) - - def wait_files(self): - """Wait for outstanding file retrieval operations to - complete.""" - - if self._hash: - self._transport._get_files(self) + filesz += action.get_action_chain_csize() + file_cnt += len(action.attrs.get("chain", "").split()) + self._progtrack.download_add_progress( + file_cnt, filesz, cachehit=True + ) + return + + # only retrieve the least preferred hash for this action + hash_attr, hash_val, hash_func = digest.get_least_preferred_hash(action) + self.add_hash(hash_val, action) + if action.name == "signature": + for c in action.get_chain_certs(least_preferred=True): + self.add_hash(c, action) + + def add_hash(self, hashval, item): + """Add 'item' to list of values that exist for + hash value 'hashval'.""" + + self._hash.setdefault(hashval, []).append(item) + + def file_done(self, hashval, current_path): + """Tell MFile that the transfer completed successfully.""" + + self._update_dlstats(hashval, current_path) + self.del_hash(hashval) + + def _update_dlstats(self, hashval, cache_path): + """Find each action associated with the hash value hashval. + Update the download statistics for this file.""" + + totalsz = 0 + nfiles = 0 + + filesz = os.stat(cache_path).st_size + for action in self._hash[hashval]: + nfiles += 1 + bn = os.path.basename(cache_path) + if action.name != "signature" or action.hash == bn: + totalsz += misc.get_pkg_otw_size(action) + else: + totalsz += action.get_chain_csize(bn) + + # The progress tracker accounts for the sizes of all actions + # even if we only have to perform one download to satisfy + # multiple actions with the same hashval. Since we know + # the size of the file we downloaded, but not necessarily + # the size of the action responsible for the download, + # generate the total size and subtract the size that was + # downloaded. The downloaded size was already accounted for in + # the engine's progress tracking. Adjust the progress tracker + # by the difference between what we have and the total we should + # have received. + nbytes = int(totalsz - filesz) + if self._progtrack: + self._progtrack.download_add_progress((nfiles - 1), nbytes) + + def subtract_progress(self, size): + """Subtract the progress accumulated by the download of + file with hash of hashval. make_openers accounts for + hashes with multiple actions. If this has been invoked, + it has happened before make_openers, so it's only necessary + to adjust the progress for a single file.""" + + if not self._progtrack: + return + + self._progtrack.download_add_progress(-1, int(-size)) + + def wait_files(self): + """Wait for outstanding file retrieval operations to + complete.""" + + if self._hash: + self._transport._get_files(self) -class MultiFileNI(MultiFile): - """A transport object for performing multi-file requests - using pkg actions. This takes care of matching the publisher - with the actions, and performs the download and content - verification necessary to assure correct content installation. - - This subclass is used when the actions won't be installed, but - are used to identify and verify the content. Additional parameters - define what happens when download finishes successfully.""" - - def __init__(self, pub, xport, final_dir=None, decompress=False, - progtrack=None, ccancel=None, alt_repo=None): - """Supply the destination publisher in the pub argument. - The transport object should be passed in xport. - - 'final_dir' indicates the directory the retrieved files should - be moved to after retrieval. If it is set to None, files will - not be moved and remain in the cache directory specified - in the 'xport' object.""" - - MultiFile.__init__(self, pub, xport, progtrack=progtrack, - ccancel=ccancel, alt_repo=alt_repo) - - self._final_dir = final_dir - self._decompress = decompress - - def add_action(self, action): - """The multiple file retrieval operation is asynchronous. - Add files to retrieve with this function. The caller - should pass the action, which causes its file to - be added to an internal retrieval list.""" - - cpath = self._transport._action_cached(action, - self.get_publisher()) - hash_attr, hash_val, hash_func = \ - digest.get_least_preferred_hash(action) +class MultiFileNI(MultiFile): + """A transport object for performing multi-file requests + using pkg actions. This takes care of matching the publisher + with the actions, and performs the download and content + verification necessary to assure correct content installation. + + This subclass is used when the actions won't be installed, but + are used to identify and verify the content. Additional parameters + define what happens when download finishes successfully.""" + + def __init__( + self, + pub, + xport, + final_dir=None, + decompress=False, + progtrack=None, + ccancel=None, + alt_repo=None, + ): + """Supply the destination publisher in the pub argument. + The transport object should be passed in xport. + + 'final_dir' indicates the directory the retrieved files should + be moved to after retrieval. If it is set to None, files will + not be moved and remain in the cache directory specified + in the 'xport' object.""" + + MultiFile.__init__( + self, + pub, + xport, + progtrack=progtrack, + ccancel=ccancel, + alt_repo=alt_repo, + ) + + self._final_dir = final_dir + self._decompress = decompress + + def add_action(self, action): + """The multiple file retrieval operation is asynchronous. + Add files to retrieve with this function. The caller + should pass the action, which causes its file to + be added to an internal retrieval list.""" + + cpath = self._transport._action_cached(action, self.get_publisher()) + hash_attr, hash_val, hash_func = digest.get_least_preferred_hash(action) + + if cpath and self._final_dir: + self._final_copy(hash_val, cpath) + if self._progtrack: + filesz = int(misc.get_pkg_otw_size(action)) + self._progtrack.download_add_progress(1, filesz, cachehit=True) + else: + self.add_hash(hash_val, action) + if action.name == "signature": + for c in action.get_chain_certs(least_preferred=True): + cpath = self._transport._action_cached( + action, self.get_publisher(), in_hash=c + ) if cpath and self._final_dir: - self._final_copy(hash_val, cpath) - if self._progtrack: - filesz = int(misc.get_pkg_otw_size(action)) - self._progtrack.download_add_progress(1, filesz, - cachehit=True) - else: - self.add_hash(hash_val, action) - if action.name == "signature": - for c in action.get_chain_certs(least_preferred=True): - cpath = self._transport._action_cached(action, - self.get_publisher(), in_hash=c) - if cpath and self._final_dir: - self._final_copy(c, cpath) - if self._progtrack: - self._progtrack.download_add_progress( - 1, int( - action.get_chain_csize(c))) - continue - # file_done does some magical accounting for - # files which may have been downloaded multiple - # times but this accounting breaks when the - # chain certificates are involved. For now, - # adjusting the pkg size and csize for the - # action associated with the certificates solves - # the problem by working around the special - # accounting. This fixes the problem because it - # tells file_done that no other data was - # expected for this hash of this action. - a = copy.copy(action) - # Copying the attrs separately is needed because - # otherwise the two copies of the actions share - # the dictionary. - a.attrs = copy.copy(action.attrs) - a.attrs["pkg.size"] = str( - action.get_chain_size(c)) - a.attrs["pkg.csize"] = str( - action.get_chain_csize(c)) - self.add_hash(c, a) - - def file_done(self, hashval, current_path): - """Tell MFile that the transfer completed successfully.""" - - totalsz = 0 - nactions = 0 - - filesz = os.stat(current_path).st_size - for action in self._hash[hashval]: - nactions += 1 - totalsz += misc.get_pkg_otw_size(action) - - # The progress tracker accounts for the sizes of all actions - # even if we only have to perform one download to satisfy - # multiple actions with the same hashval. Since we know - # the size of the file we downloaded, but not necessarily - # the size of the action responsible for the download, - # generate the total size and subtract the size that was - # downloaded. The downloaded size was already accounted for in - # the engine's progress tracking. Adjust the progress tracker - # by the difference between what we have and the total we should - # have received. - nbytes = int(totalsz - filesz) - if self._progtrack: - self._progtrack.download_add_progress((nactions - 1), - nbytes) - - if self._final_dir: - self._final_copy(hashval, current_path) - self.del_hash(hashval) - - def _final_copy(self, hashval, current_path): - """Copy the file named by hashval from current_path - to the final destination, decompressing, if necessary.""" - - dest = os.path.join(self._final_dir, hashval) - tmp_prefix = "{0}.".format(hashval) + self._final_copy(c, cpath) + if self._progtrack: + self._progtrack.download_add_progress( + 1, int(action.get_chain_csize(c)) + ) + continue + # file_done does some magical accounting for + # files which may have been downloaded multiple + # times but this accounting breaks when the + # chain certificates are involved. For now, + # adjusting the pkg size and csize for the + # action associated with the certificates solves + # the problem by working around the special + # accounting. This fixes the problem because it + # tells file_done that no other data was + # expected for this hash of this action. + a = copy.copy(action) + # Copying the attrs separately is needed because + # otherwise the two copies of the actions share + # the dictionary. + a.attrs = copy.copy(action.attrs) + a.attrs["pkg.size"] = str(action.get_chain_size(c)) + a.attrs["pkg.csize"] = str(action.get_chain_csize(c)) + self.add_hash(c, a) + + def file_done(self, hashval, current_path): + """Tell MFile that the transfer completed successfully.""" + + totalsz = 0 + nactions = 0 + + filesz = os.stat(current_path).st_size + for action in self._hash[hashval]: + nactions += 1 + totalsz += misc.get_pkg_otw_size(action) + + # The progress tracker accounts for the sizes of all actions + # even if we only have to perform one download to satisfy + # multiple actions with the same hashval. Since we know + # the size of the file we downloaded, but not necessarily + # the size of the action responsible for the download, + # generate the total size and subtract the size that was + # downloaded. The downloaded size was already accounted for in + # the engine's progress tracking. Adjust the progress tracker + # by the difference between what we have and the total we should + # have received. + nbytes = int(totalsz - filesz) + if self._progtrack: + self._progtrack.download_add_progress((nactions - 1), nbytes) + + if self._final_dir: + self._final_copy(hashval, current_path) + self.del_hash(hashval) + + def _final_copy(self, hashval, current_path): + """Copy the file named by hashval from current_path + to the final destination, decompressing, if necessary.""" + + dest = os.path.join(self._final_dir, hashval) + tmp_prefix = "{0}.".format(hashval) - try: - os.makedirs(self._final_dir, mode=misc.PKG_DIR_MODE) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException(e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - if e.errno != errno.EEXIST: - raise + try: + os.makedirs(self._final_dir, mode=misc.PKG_DIR_MODE) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + if e.errno != errno.EEXIST: + raise - try: - fd, fn = tempfile.mkstemp(dir=self._final_dir, - prefix=tmp_prefix) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException( - e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - raise - - src = open(current_path, "rb") - outfile = os.fdopen(fd, "wb") - if self._decompress: - misc.gunzip_from_stream(src, outfile, ignore_hash=True) - else: - while True: - buf = src.read(64 * 1024) - if buf == b"": - break - outfile.write(buf) - outfile.close() - src.close() + try: + fd, fn = tempfile.mkstemp(dir=self._final_dir, prefix=tmp_prefix) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + raise + + src = open(current_path, "rb") + outfile = os.fdopen(fd, "wb") + if self._decompress: + misc.gunzip_from_stream(src, outfile, ignore_hash=True) + else: + while True: + buf = src.read(64 * 1024) + if buf == b"": + break + outfile.write(buf) + outfile.close() + src.close() + + try: + os.chmod(fn, misc.PKG_FILE_MODE) + portable.rename(fn, dest) + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise apx.PermissionsException(e.filename) + if e.errno == errno.EROFS: + raise apx.ReadOnlyFileSystemException(e.filename) + raise - try: - os.chmod(fn, misc.PKG_FILE_MODE) - portable.rename(fn, dest) - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise apx.PermissionsException(e.filename) - if e.errno == errno.EROFS: - raise apx.ReadOnlyFileSystemException( - e.filename) - raise # The following two methods are to be used by clients without an Image that # need to configure a transport and or publishers. -def setup_publisher(repo_uri, prefix, xport, xport_cfg, - remote_prefix=False, remote_publishers=False, ssl_key=None, - ssl_cert=None): - """Given transport 'xport' and publisher configuration 'xport_cfg' - take the string that identifies a repository by uri in 'repo_uri' - and create a publisher object. The caller must specify the prefix. - - If remote_prefix is True, the caller will contact the remote host - and use its publisher info to determine the publisher's actual prefix. - If remote_publishers is True, the caller will obtain the prefix and - repository information from the repo's publisher info.""" - - if isinstance(repo_uri, list): - repo = publisher.Repository(origins=repo_uri) - repouri_list = repo_uri +def setup_publisher( + repo_uri, + prefix, + xport, + xport_cfg, + remote_prefix=False, + remote_publishers=False, + ssl_key=None, + ssl_cert=None, +): + """Given transport 'xport' and publisher configuration 'xport_cfg' + take the string that identifies a repository by uri in 'repo_uri' + and create a publisher object. The caller must specify the prefix. + + If remote_prefix is True, the caller will contact the remote host + and use its publisher info to determine the publisher's actual prefix. + + If remote_publishers is True, the caller will obtain the prefix and + repository information from the repo's publisher info.""" + + if isinstance(repo_uri, list): + repo = publisher.Repository(origins=repo_uri) + repouri_list = repo_uri + else: + repouri_list = [publisher.RepositoryURI(repo_uri)] + repo = publisher.Repository(origins=repouri_list) + + for origin in repo.origins: + if origin.scheme == "https": + origin.ssl_key = ssl_key + origin.ssl_cert = ssl_cert + + pub = publisher.Publisher(prefix=prefix, repository=repo) + + if not remote_prefix and not remote_publishers: + xport_cfg.add_publisher(pub) + return pub + + try: + newpubs = xport.get_publisherdata(pub) + except apx.UnsupportedRepositoryOperation: + newpubs = None + + if not newpubs: + xport_cfg.add_publisher(pub) + return pub + + for p in newpubs: + psr = p.repository + + if not psr: + p.repository = repo + elif remote_publishers: + if not psr.origins: + for r in repouri_list: + psr.add_origin(r) + elif repo not in psr.origins: + for i, r in enumerate(repouri_list): + psr.origins.insert(i, r) else: - repouri_list = [publisher.RepositoryURI(repo_uri)] - repo = publisher.Repository(origins=repouri_list) - - for origin in repo.origins: - if origin.scheme == "https": - origin.ssl_key = ssl_key - origin.ssl_cert = ssl_cert + psr.origins = repouri_list - pub = publisher.Publisher(prefix=prefix, repository=repo) + if p.repository: + for origin in p.repository.origins: + if origin.scheme == pkg.client.publisher.SSL_SCHEMES: + origin.ssl_key = ssl_key + origin.ssl_cert = ssl_cert - if not remote_prefix and not remote_publishers: - xport_cfg.add_publisher(pub) - return pub + xport_cfg.add_publisher(p) - try: - newpubs = xport.get_publisherdata(pub) - except apx.UnsupportedRepositoryOperation: - newpubs = None - - if not newpubs: - xport_cfg.add_publisher(pub) - return pub - - for p in newpubs: - psr = p.repository - - if not psr: - p.repository = repo - elif remote_publishers: - if not psr.origins: - for r in repouri_list: - psr.add_origin(r) - elif repo not in psr.origins: - for i, r in enumerate(repouri_list): - psr.origins.insert(i, r) - else: - psr.origins = repouri_list + # Return first publisher in list + return newpubs[0] - if p.repository: - for origin in p.repository.origins: - if origin.scheme == \ - pkg.client.publisher.SSL_SCHEMES: - origin.ssl_key = ssl_key - origin.ssl_cert = ssl_cert - - xport_cfg.add_publisher(p) - - # Return first publisher in list - return newpubs[0] def setup_transport(): - """Initialize the transport and transport configuration. The caller - must manipulate the transport configuration and add publishers - once it receives control of the objects.""" + """Initialize the transport and transport configuration. The caller + must manipulate the transport configuration and add publishers + once it receives control of the objects.""" + + xport_cfg = GenericTransportCfg() + xport = Transport(xport_cfg) - xport_cfg = GenericTransportCfg() - xport = Transport(xport_cfg) + return xport, xport_cfg - return xport, xport_cfg # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/config.py b/src/modules/config.py index 336e7e139..109c20dc5 100644 --- a/src/modules/config.py +++ b/src/modules/config.py @@ -69,1862 +69,1886 @@ class ConfigError(api_errors.ApiException): - """Base exception class for property errors.""" + """Base exception class for property errors.""" class PropertyConfigError(ConfigError): - """Base exception class for property errors.""" + """Base exception class for property errors.""" - def __init__(self, section=None, prop=None): - api_errors.ApiException.__init__(self) - assert section is not None or prop is not None - self.section = section - self.prop = prop + def __init__(self, section=None, prop=None): + api_errors.ApiException.__init__(self) + assert section is not None or prop is not None + self.section = section + self.prop = prop class InvalidPropertyNameError(PropertyConfigError): - """Exception class used to indicate an invalid property name.""" + """Exception class used to indicate an invalid property name.""" - def __init__(self, prop): - assert prop is not None - PropertyConfigError.__init__(self, prop=prop) + def __init__(self, prop): + assert prop is not None + PropertyConfigError.__init__(self, prop=prop) - def __str__(self): - return _("Property name '{0}' is not valid. Section names " - "may not contain: tabs, newlines, carriage returns, " - "form feeds, vertical tabs, slashes, backslashes, or " - "non-ASCII characters.").format(self.prop) + def __str__(self): + return _( + "Property name '{0}' is not valid. Section names " + "may not contain: tabs, newlines, carriage returns, " + "form feeds, vertical tabs, slashes, backslashes, or " + "non-ASCII characters." + ).format(self.prop) class InvalidPropertyTemplateNameError(PropertyConfigError): - """Exception class used to indicate an invalid property template name. - """ + """Exception class used to indicate an invalid property template name.""" - def __init__(self, prop): - assert prop is not None - PropertyConfigError.__init__(self, prop=prop) + def __init__(self, prop): + assert prop is not None + PropertyConfigError.__init__(self, prop=prop) - def __str__(self): - return _("Property template name '{0}' is not valid.").format( - self.prop) + def __str__(self): + return _("Property template name '{0}' is not valid.").format(self.prop) class InvalidPropertyValueError(PropertyConfigError): - """Exception class used to indicate an invalid property value.""" - - def __init__(self, maximum=None, minimum=None, section=None, prop=None, - value=None): - PropertyConfigError.__init__(self, section=section, prop=prop) - assert not (minimum is not None and maximum is not None) - self.maximum = maximum - self.minimum = minimum - self.value = value - - def __str__(self): - if self.minimum is not None: - return _("'{value}' is less than the minimum " - "of '{minimum}' permitted for property " - "'{prop}' in section '{section}'.").format( - **self.__dict__) - if self.maximum is not None: - return _("'{value}' is greater than the maximum " - "of '{maximum}' permitted for property " - "'{prop}' in section '{section}'.").format( - **self.__dict__) - if self.section: - return _("Invalid value '{value}' for property " - "'{prop}' in section '{section}'.").format( - **self.__dict__) - return _("Invalid value '{value}' for {prop}.").format( - **self.__dict__) + """Exception class used to indicate an invalid property value.""" + + def __init__( + self, maximum=None, minimum=None, section=None, prop=None, value=None + ): + PropertyConfigError.__init__(self, section=section, prop=prop) + assert not (minimum is not None and maximum is not None) + self.maximum = maximum + self.minimum = minimum + self.value = value + + def __str__(self): + if self.minimum is not None: + return _( + "'{value}' is less than the minimum " + "of '{minimum}' permitted for property " + "'{prop}' in section '{section}'." + ).format(**self.__dict__) + if self.maximum is not None: + return _( + "'{value}' is greater than the maximum " + "of '{maximum}' permitted for property " + "'{prop}' in section '{section}'." + ).format(**self.__dict__) + if self.section: + return _( + "Invalid value '{value}' for property " + "'{prop}' in section '{section}'." + ).format(**self.__dict__) + return _("Invalid value '{value}' for {prop}.").format(**self.__dict__) class PropertyMultiValueError(InvalidPropertyValueError): - """Exception class used to indicate the property in question doesn't - allow multiple values.""" + """Exception class used to indicate the property in question doesn't + allow multiple values.""" - def __str__(self): - if self.section: - return _("Property '{prop}' in section '{section}' " - "doesn't allow multiple values.").format( - **self.__dict__) - return _("Property {0} doesn't allow multiple values.").format( - self.prop) + def __str__(self): + if self.section: + return _( + "Property '{prop}' in section '{section}' " + "doesn't allow multiple values." + ).format(**self.__dict__) + return _("Property {0} doesn't allow multiple values.").format( + self.prop + ) class UnknownPropertyValueError(PropertyConfigError): - """Exception class used to indicate that the value specified - could not be found in the property's list of values.""" + """Exception class used to indicate that the value specified + could not be found in the property's list of values.""" - def __init__(self, section=None, prop=None, value=None): - PropertyConfigError.__init__(self, section=section, prop=prop) - self.value = value + def __init__(self, section=None, prop=None, value=None): + PropertyConfigError.__init__(self, section=section, prop=prop) + self.value = value - def __str__(self): - if self.section: - return _("Value '{value}' not found in the list of " - "values for property '{prop}' in section " - "'{section}'.").format(**self.__dict__) - return _("Value '{value}' not found in the list of values " - "for {prop} .").format(**self.__dict__) + def __str__(self): + if self.section: + return _( + "Value '{value}' not found in the list of " + "values for property '{prop}' in section " + "'{section}'." + ).format(**self.__dict__) + return _( + "Value '{value}' not found in the list of values " "for {prop} ." + ).format(**self.__dict__) class InvalidSectionNameError(PropertyConfigError): - """Exception class used to indicate an invalid section name.""" + """Exception class used to indicate an invalid section name.""" - def __init__(self, section): - assert section is not None - PropertyConfigError.__init__(self, section=section) + def __init__(self, section): + assert section is not None + PropertyConfigError.__init__(self, section=section) - def __str__(self): - return _("Section name '{0}' is not valid. Section names " - "may not contain: tabs, newlines, carriage returns, " - "form feeds, vertical tabs, slashes, backslashes, or " - "non-ASCII characters.").format(self.section) + def __str__(self): + return _( + "Section name '{0}' is not valid. Section names " + "may not contain: tabs, newlines, carriage returns, " + "form feeds, vertical tabs, slashes, backslashes, or " + "non-ASCII characters." + ).format(self.section) class InvalidSectionTemplateNameError(PropertyConfigError): - """Exception class used to indicate an invalid section template name.""" + """Exception class used to indicate an invalid section template name.""" - def __init__(self, section): - assert section is not None - PropertyConfigError.__init__(self, section=section) + def __init__(self, section): + assert section is not None + PropertyConfigError.__init__(self, section=section) - def __str__(self): - return _("Section template name '{0}' is not valid.").format( - self.section) + def __str__(self): + return _("Section template name '{0}' is not valid.").format( + self.section + ) class UnknownPropertyError(PropertyConfigError): - """Exception class used to indicate an invalid property.""" + """Exception class used to indicate an invalid property.""" - def __str__(self): - if self.section: - return _("Unknown property '{prop}' in section " - "'{section}'.").format(**self.__dict__) - return _("Unknown property {0}").format(self.prop) + def __str__(self): + if self.section: + return _( + "Unknown property '{prop}' in section " "'{section}'." + ).format(**self.__dict__) + return _("Unknown property {0}").format(self.prop) class UnknownSectionError(PropertyConfigError): - """Exception class used to indicate an invalid section.""" + """Exception class used to indicate an invalid section.""" - def __str__(self): - return _("Unknown property section: {0}.").format( - self.section) + def __str__(self): + return _("Unknown property section: {0}.").format(self.section) @python_2_unicode_compatible class Property(object): - """Base class for properties.""" - - # Whitespace (except single space), '/', and '\' are never allowed. - __name_re = re.compile(r"\A[^\t\n\r\f\v\\/]+\Z") - - _value = None - _value_map = misc.EmptyDict - - def __init__(self, name, default="", value_map=misc.EmptyDict): - if not isinstance(name, six.string_types) or \ - not self.__name_re.match(name): - raise InvalidPropertyNameError(prop=name) + """Base class for properties.""" + + # Whitespace (except single space), '/', and '\' are never allowed. + __name_re = re.compile(r"\A[^\t\n\r\f\v\\/]+\Z") + + _value = None + _value_map = misc.EmptyDict + + def __init__(self, name, default="", value_map=misc.EmptyDict): + if not isinstance(name, six.string_types) or not self.__name_re.match( + name + ): + raise InvalidPropertyNameError(prop=name) + try: + name.encode("ascii") + except ValueError: + # Name contains non-ASCII characters. + raise InvalidPropertyNameError(prop=name) + self.__name = name + + # Last, set the property's initial value. + self.value = default + self._value_map = value_map + + def __lt__(self, other): + if not isinstance(other, Property): + return True + return self.name < other.name + + def __gt__(self, other): + if not isinstance(other, Property): + return False + return self.name > other.name + + def __le__(self, other): + return self == other or self < other + + def __ge__(self, other): + return self == other or self > other + + def __eq__(self, other): + if not isinstance(other, Property): + return False + if self.name != other.name: + return False + return self.value == other.value + + def __ne__(self, other): + if not isinstance(other, Property): + return True + if self.name != other.name: + return True + return self.value != other.value + + def __hash__(self): + return hash((self.name, self.value)) + + def __copy__(self): + return self.__class__( + self.name, default=self.value, value_map=self._value_map + ) + + def __str__(self): + # Assume that value can be represented in utf-8. + return misc.force_text(self.value) + + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. + """ + if not isinstance(value, six.string_types): + # Only string values are allowed. + raise InvalidPropertyValueError(prop=self.name, value=value) + + def _transform_string(self, value): + # Transform encoded UTF-8 data into unicode objects if needed. + if isinstance(value, bytes): + # Automatically transform encoded UTF-8 data into + # unicode objects if needed. + try: + value = value.encode("ascii") + except ValueError: try: - name.encode("ascii") + value = value.decode("utf-8") except ValueError: - # Name contains non-ASCII characters. - raise InvalidPropertyNameError(prop=name) - self.__name = name - - # Last, set the property's initial value. - self.value = default - self._value_map = value_map - - def __lt__(self, other): - if not isinstance(other, Property): - return True - return self.name < other.name - - def __gt__(self, other): - if not isinstance(other, Property): - return False - return self.name > other.name - - def __le__(self, other): - return self == other or self < other - - def __ge__(self, other): - return self == other or self > other - - def __eq__(self, other): - if not isinstance(other, Property): - return False - if self.name != other.name: - return False - return self.value == other.value - - def __ne__(self, other): - if not isinstance(other, Property): - return True - if self.name != other.name: - return True - return self.value != other.value - - def __hash__(self): - return hash((self.name, self.value)) - - def __copy__(self): - return self.__class__(self.name, default=self.value, - value_map=self._value_map) - - def __str__(self): - # Assume that value can be represented in utf-8. - return misc.force_text(self.value) - - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ - if not isinstance(value, six.string_types): - # Only string values are allowed. - raise InvalidPropertyValueError(prop=self.name, - value=value) - - def _transform_string(self, value): - # Transform encoded UTF-8 data into unicode objects if needed. - if isinstance(value, bytes): - # Automatically transform encoded UTF-8 data into - # unicode objects if needed. - try: - value = value.encode("ascii") - except ValueError: - try: - value = value.decode("utf-8") - except ValueError: - # Assume sequence of arbitrary - # 8-bit data. - pass - return value - - @property - def name(self): - """The name of the property.""" - return self.__name - - @property - def value(self): - """The value of the property.""" - return self._value - - @value.setter - def value(self, value): - """Sets the property's value.""" - if isinstance(value, six.string_types): - value = self._value_map.get(value, value) - if value is None: - value = "" - elif isinstance(value, (bool, int)): - value = str(value) - else: - value = self._transform_string(value) - self._is_allowed(value) - self._value = value + # Assume sequence of arbitrary + # 8-bit data. + pass + return value + + @property + def name(self): + """The name of the property.""" + return self.__name + + @property + def value(self): + """The value of the property.""" + return self._value + + @value.setter + def value(self, value): + """Sets the property's value.""" + if isinstance(value, six.string_types): + value = self._value_map.get(value, value) + if value is None: + value = "" + elif isinstance(value, (bool, int)): + value = str(value) + else: + value = self._transform_string(value) + self._is_allowed(value) + self._value = value class PropertyTemplate(object): - """A class representing a template for a property. These templates are - used when loading existing configuration data or when adding new - properties to an existing configuration object if the property name - found matches the pattern name given for the template. + """A class representing a template for a property. These templates are + used when loading existing configuration data or when adding new + properties to an existing configuration object if the property name + found matches the pattern name given for the template. + """ + + def __init__( + self, + name_pattern, + allowed=None, + default=None, + prop_type=Property, + value_map=None, + ): + assert prop_type + if not isinstance(name_pattern, six.string_types) or not name_pattern: + raise InvalidPropertyTemplateNameError(prop=name_pattern) + self.__name = name_pattern + try: + self.__pattern = re.compile(name_pattern) + except Exception: + # Unfortunately, python doesn't have a public exception + # class to catch re parse issues; but this only happens + # for misbehaved programs anyway. + raise InvalidPropertyTemplateNameError(prop=name_pattern) + + self.__allowed = allowed + self.__default = default + self.__prop_type = prop_type + self.__value_map = value_map + + def __copy__(self): + return self.__class__( + self.__name, + allowed=self.__allowed, + default=self.__default, + prop_type=self.__prop_type, + value_map=self.__value_map, + ) + + def create(self, name): + """Returns a new PropertySection object based on the template + using the given name. """ + assert self.match(name) + pargs = {} + if self.__allowed is not None: + pargs["allowed"] = self.__allowed + if self.__default is not None: + pargs["default"] = self.__default + if self.__value_map is not None: + pargs["value_map"] = self.__value_map + return self.__prop_type(name, **pargs) + + def match(self, name): + """Returns a boolean indicating whether the given name matches + the pattern for this template. + """ + return self.__pattern.match(name) is not None - def __init__(self, name_pattern, allowed=None, default=None, - prop_type=Property, value_map=None): - assert prop_type - if not isinstance(name_pattern, six.string_types) or not name_pattern: - raise InvalidPropertyTemplateNameError( - prop=name_pattern) - self.__name = name_pattern - try: - self.__pattern = re.compile(name_pattern) - except Exception: - # Unfortunately, python doesn't have a public exception - # class to catch re parse issues; but this only happens - # for misbehaved programs anyway. - raise InvalidPropertyTemplateNameError( - prop=name_pattern) - - self.__allowed = allowed - self.__default = default - self.__prop_type = prop_type - self.__value_map = value_map - - def __copy__(self): - return self.__class__(self.__name, allowed=self.__allowed, - default=self.__default, prop_type=self.__prop_type, - value_map=self.__value_map) - - def create(self, name): - """Returns a new PropertySection object based on the template - using the given name. - """ - assert self.match(name) - pargs = {} - if self.__allowed is not None: - pargs["allowed"] = self.__allowed - if self.__default is not None: - pargs["default"] = self.__default - if self.__value_map is not None: - pargs["value_map"] = self.__value_map - return self.__prop_type(name, **pargs) - - def match(self, name): - """Returns a boolean indicating whether the given name matches - the pattern for this template. - """ - return self.__pattern.match(name) is not None - - @property - def name(self): - """The name (pattern string) of the property template.""" - # Must return a string. - return self.__name + @property + def name(self): + """The name (pattern string) of the property template.""" + # Must return a string. + return self.__name class PropBool(Property): - """Class representing properties with a boolean value.""" - - def __init__(self, name, default=False, value_map=misc.EmptyDict): - Property.__init__(self, name, default=default, - value_map=value_map) - - @Property.value.setter - def value(self, value): - if isinstance(value, six.string_types): - value = self._value_map.get(value, value) - if value is None or value == "": - self._value = False - return - elif isinstance(value, six.string_types): - if value.lower() == "true": - self._value = True - return - elif value.lower() == "false": - self._value = False - return - elif isinstance(value, bool): - self._value = value - return - raise InvalidPropertyValueError(prop=self.name, value=value) + """Class representing properties with a boolean value.""" + + def __init__(self, name, default=False, value_map=misc.EmptyDict): + Property.__init__(self, name, default=default, value_map=value_map) + + @Property.value.setter + def value(self, value): + if isinstance(value, six.string_types): + value = self._value_map.get(value, value) + if value is None or value == "": + self._value = False + return + elif isinstance(value, six.string_types): + if value.lower() == "true": + self._value = True + return + elif value.lower() == "false": + self._value = False + return + elif isinstance(value, bool): + self._value = value + return + raise InvalidPropertyValueError(prop=self.name, value=value) class PropInt(Property): - """Class representing a property with an integer value.""" - - def __init__(self, name, default=0, maximum=None, - minimum=0, value_map=misc.EmptyDict): - assert minimum is None or type(minimum) == int - assert maximum is None or type(maximum) == int - self.__maximum = maximum - self.__minimum = minimum - Property.__init__(self, name, default=default, - value_map=value_map) - - def __copy__(self): - prop = Property.__copy__(self) - prop.__maximum = self.__maximum - prop.__minimum = self.__minimum - return prop - - @property - def minimum(self): - """Minimum value permitted for this property or None.""" - return self.__minimum - - @property - def maximum(self): - """Maximum value permitted for this property or None.""" - return self.__maximum - - @Property.value.setter - def value(self, value): - if isinstance(value, six.string_types): - value = self._value_map.get(value, value) - if value is None or value == "": - value = 0 - - try: - nvalue = int(value) - except Exception: - raise InvalidPropertyValueError(prop=self.name, - value=value) - - if self.minimum is not None and nvalue < self.minimum: - raise InvalidPropertyValueError(prop=self.name, - minimum=self.minimum, value=value) - if self.maximum is not None and nvalue > self.maximum: - raise InvalidPropertyValueError(prop=self.name, - maximum=self.maximum, value=value) - self._value = nvalue + """Class representing a property with an integer value.""" + + def __init__( + self, name, default=0, maximum=None, minimum=0, value_map=misc.EmptyDict + ): + assert minimum is None or type(minimum) == int + assert maximum is None or type(maximum) == int + self.__maximum = maximum + self.__minimum = minimum + Property.__init__(self, name, default=default, value_map=value_map) + + def __copy__(self): + prop = Property.__copy__(self) + prop.__maximum = self.__maximum + prop.__minimum = self.__minimum + return prop + + @property + def minimum(self): + """Minimum value permitted for this property or None.""" + return self.__minimum + + @property + def maximum(self): + """Maximum value permitted for this property or None.""" + return self.__maximum + + @Property.value.setter + def value(self, value): + if isinstance(value, six.string_types): + value = self._value_map.get(value, value) + if value is None or value == "": + value = 0 + + try: + nvalue = int(value) + except Exception: + raise InvalidPropertyValueError(prop=self.name, value=value) + + if self.minimum is not None and nvalue < self.minimum: + raise InvalidPropertyValueError( + prop=self.name, minimum=self.minimum, value=value + ) + if self.maximum is not None and nvalue > self.maximum: + raise InvalidPropertyValueError( + prop=self.name, maximum=self.maximum, value=value + ) + self._value = nvalue class PropPublisher(Property): - """Class representing properties with a publisher prefix/alias value.""" + """Class representing properties with a publisher prefix/alias value.""" - @Property.value.setter - def value(self, value): - if isinstance(value, six.string_types): - value = self._value_map.get(value, value) - if value is None or value == "": - self._value = "" - return + @Property.value.setter + def value(self, value): + if isinstance(value, six.string_types): + value = self._value_map.get(value, value) + if value is None or value == "": + self._value = "" + return - if not isinstance(value, six.string_types) or \ - not misc.valid_pub_prefix(value): - # Only string values are allowed. - raise InvalidPropertyValueError(prop=self.name, - value=value) - self._value = value + if not isinstance(value, six.string_types) or not misc.valid_pub_prefix( + value + ): + # Only string values are allowed. + raise InvalidPropertyValueError(prop=self.name, value=value) + self._value = value class PropDefined(Property): - """Class representing properties with that can only have one of a set - of pre-defined values.""" - - def __init__(self, name, allowed=misc.EmptyI, default="", - value_map=misc.EmptyDict): - self.__allowed = allowed - Property.__init__(self, name, default=default, - value_map=value_map) - - def __copy__(self): - prop = Property.__copy__(self) - prop.__allowed = copy.copy(self.__allowed) - return prop - - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ - - # Enforce base class rules. - Property._is_allowed(self, value) - - if len(self.__allowed) == 0: - return - - for a in self.__allowed: - if value == a: - break - if a == "" and \ - value.startswith("exec:") and \ - len(value) > 5: - # Don't try to determine if path is valid; - # just that the value starts with 'exec:'. - break - if a == "" and value.startswith("svc:") and \ - len(value) > 4: - # Don't try to determine if FMRI is valid; - # just that the value starts with 'svc:'. - break - if a == "" and os.path.isabs(value): - break - if a == "" and len(value) > 1: - # Don't try to determine if path is valid; - # just that the length is greater than 1. - break - else: - raise InvalidPropertyValueError(prop=self.name, - value=value) + """Class representing properties with that can only have one of a set + of pre-defined values.""" + + def __init__( + self, name, allowed=misc.EmptyI, default="", value_map=misc.EmptyDict + ): + self.__allowed = allowed + Property.__init__(self, name, default=default, value_map=value_map) + + def __copy__(self): + prop = Property.__copy__(self) + prop.__allowed = copy.copy(self.__allowed) + return prop + + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. + """ + + # Enforce base class rules. + Property._is_allowed(self, value) + + if len(self.__allowed) == 0: + return + + for a in self.__allowed: + if value == a: + break + if ( + a == "" + and value.startswith("exec:") + and len(value) > 5 + ): + # Don't try to determine if path is valid; + # just that the value starts with 'exec:'. + break + if a == "" and value.startswith("svc:") and len(value) > 4: + # Don't try to determine if FMRI is valid; + # just that the value starts with 'svc:'. + break + if a == "" and os.path.isabs(value): + break + if a == "" and len(value) > 1: + # Don't try to determine if path is valid; + # just that the length is greater than 1. + break + else: + raise InvalidPropertyValueError(prop=self.name, value=value) + + @property + def allowed(self): + """A list of allowed values for this property.""" + return self.__allowed - @property - def allowed(self): - """A list of allowed values for this property.""" - return self.__allowed class PropList(PropDefined): - """Class representing properties with a list of string values that may - contain arbitrary character data. - """ + """Class representing properties with a list of string values that may + contain arbitrary character data. + """ + + def _parse_str(self, value): + """Parse the provided python string literal and return the + resulting data structure.""" + try: + value = ast.literal_eval(value) + except (SyntaxError, ValueError): + # ast raises ValueError if input isn't safe or + # valid. + raise InvalidPropertyValueError(prop=self.name, value=value) + return value + + @PropDefined.value.setter + def value(self, value): + # the value can be arbitrary 8-bit data, so we allow bytes here + if isinstance(value, (six.string_types, bytes)): + value = self._value_map.get(value, value) + if value is None or value == "": + value = [] + elif isinstance(value, (six.string_types, bytes)): + value = self._parse_str(value) + if not isinstance(value, list): + # Only accept lists for literal string form. + raise InvalidPropertyValueError(prop=self.name, value=value) + else: + try: + iter(value) + except TypeError: + raise InvalidPropertyValueError(prop=self.name, value=value) - def _parse_str(self, value): - """Parse the provided python string literal and return the - resulting data structure.""" - try: - value = ast.literal_eval(value) - except (SyntaxError, ValueError): - # ast raises ValueError if input isn't safe or - # valid. - raise InvalidPropertyValueError(prop=self.name, - value=value) - return value - - @PropDefined.value.setter - def value(self, value): - # the value can be arbitrary 8-bit data, so we allow bytes here - if isinstance(value, (six.string_types, bytes)): - value = self._value_map.get(value, value) - if value is None or value == "": - value = [] - elif isinstance(value, (six.string_types, bytes)): - value = self._parse_str(value) - if not isinstance(value, list): - # Only accept lists for literal string form. - raise InvalidPropertyValueError(prop=self.name, - value=value) - else: - try: - iter(value) - except TypeError: - raise InvalidPropertyValueError(prop=self.name, - value=value) - - nvalue = [] - for v in value: - if v is None: - v = "" - elif isinstance(v, (bool, int)): - v = str(v) - elif not isinstance(v, six.string_types): - # Only string values are allowed. - raise InvalidPropertyValueError(prop=self.name, - value=value) - self._is_allowed(v) - nvalue.append(v) + nvalue = [] + for v in value: + if v is None: + v = "" + elif isinstance(v, (bool, int)): + v = str(v) + elif not isinstance(v, six.string_types): + # Only string values are allowed. + raise InvalidPropertyValueError(prop=self.name, value=value) + self._is_allowed(v) + nvalue.append(v) - if self.allowed and "" not in self.allowed and not len(nvalue): - raise InvalidPropertyValueError(prop=self.name, - value=nvalue) + if self.allowed and "" not in self.allowed and not len(nvalue): + raise InvalidPropertyValueError(prop=self.name, value=nvalue) - self._value = nvalue + self._value = nvalue class PropDictionaryList(PropList): - """Class representing properties with a value specified as a list of - dictionaries. Each dictionary must contain string key/value pairs, or - a string key, with None as a value. - """ + """Class representing properties with a value specified as a list of + dictionaries. Each dictionary must contain string key/value pairs, or + a string key, with None as a value. + """ + + @PropDefined.value.setter + def value(self, value): + if isinstance(value, six.string_types): + value = self._value_map.get(value, value) + if value is None or value == "": + value = [] + elif isinstance(value, six.string_types): + value = self._parse_str(value) + if not isinstance(value, list): + # Only accept lists for literal string form. + raise InvalidPropertyValueError(prop=self.name, value=value) + else: + try: + iter(value) + except TypeError: + raise InvalidPropertyValueError(prop=self.name, value=value) - @PropDefined.value.setter - def value(self, value): - if isinstance(value, six.string_types): - value = self._value_map.get(value, value) - if value is None or value == "": - value = [] - elif isinstance(value, six.string_types): - value = self._parse_str(value) - if not isinstance(value, list): - # Only accept lists for literal string form. - raise InvalidPropertyValueError(prop=self.name, - value=value) - else: - try: - iter(value) - except TypeError: - raise InvalidPropertyValueError(prop=self.name, - value=value) - - self._is_allowed(value) - nvalue = [] - for v in value: - if v is None: - v = {} - elif not isinstance(v, dict): - # Only dict values are allowed. - raise InvalidPropertyValueError(prop=self.name, - value=value) - for item in v: - # we allow None values, but always store them - # as an empty string to prevent them getting - # serialised as "None" - if not v[item]: - v[item] = "" - nvalue.append(v) + self._is_allowed(value) + nvalue = [] + for v in value: + if v is None: + v = {} + elif not isinstance(v, dict): + # Only dict values are allowed. + raise InvalidPropertyValueError(prop=self.name, value=value) + for item in v: + # we allow None values, but always store them + # as an empty string to prevent them getting + # serialised as "None" + if not v[item]: + v[item] = "" + nvalue.append(v) + + # if we don't allow an empty list, raise an error + if self.allowed and "" not in self.allowed and not len(nvalue): + raise InvalidPropertyValueError(prop=self.name, value=nvalue) + self._value = nvalue + + def _is_allowed(self, value): + if not isinstance(value, list): + raise InvalidPropertyValueError(prop=self.name, value=value) + + # ensure that we only have dictionary values + for dic in value: + if not isinstance(dic, dict): + raise InvalidPropertyValueError(prop=self.name, value=value) + + if not self.allowed: + return + + # ensure that each dictionary in the value is allowed + for dic in value: + if not isinstance(dic, dict): + raise InvalidPropertyValueError(prop=self.name, value=value) + if dic not in self.allowed: + raise InvalidPropertyValueError(prop=self.name, value=value) + for key, val in dic.items(): + Property._is_allowed(self, key) + if not val: + continue + Property._is_allowed(self, val) - # if we don't allow an empty list, raise an error - if self.allowed and "" not in self.allowed and not len(nvalue): - raise InvalidPropertyValueError(prop=self.name, - value=nvalue) - self._value = nvalue - - def _is_allowed(self, value): - if not isinstance(value, list): - raise InvalidPropertyValueError(prop=self.name, - value=value) - - # ensure that we only have dictionary values - for dic in value: - if not isinstance(dic, dict): - raise InvalidPropertyValueError(prop=self.name, - value=value) - - if not self.allowed: - return - - # ensure that each dictionary in the value is allowed - for dic in value: - if not isinstance(dic, dict): - raise InvalidPropertyValueError(prop=self.name, - value=value) - if dic not in self.allowed: - raise InvalidPropertyValueError(prop=self.name, - value=value) - for key, val in dic.items(): - Property._is_allowed(self, key) - if not val: - continue - Property._is_allowed(self, val) @python_2_unicode_compatible class PropSimpleList(PropList): - """Class representing a property with a list of string values that are - simple in nature. Output is in a comma-separated format that may not - be suitable for some datasets such as those containing arbitrary data, - newlines, commas or that may contain zero-length strings. This class - exists for compatibility with older configuration files that stored - lists of data in this format and should not be used for new consumers. + """Class representing a property with a list of string values that are + simple in nature. Output is in a comma-separated format that may not + be suitable for some datasets such as those containing arbitrary data, + newlines, commas or that may contain zero-length strings. This class + exists for compatibility with older configuration files that stored + lists of data in this format and should not be used for new consumers. + """ + + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. """ - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ + # Enforce base class rules. + PropList._is_allowed(self, value) - # Enforce base class rules. - PropList._is_allowed(self, value) + if isinstance(value, bytes): + try: + value.decode("utf-8") + except ValueError: + # Arbitrary 8-bit data not supported for simple + # lists. + raise InvalidPropertyValueError(prop=self.name, value=value) - if isinstance(value, bytes): - try: - value.decode("utf-8") - except ValueError: - # Arbitrary 8-bit data not supported for simple - # lists. - raise InvalidPropertyValueError(prop=self.name, - value=value) - - def _parse_str(self, value): - """Parse the provided list string and return it as a list.""" - # Automatically transform encoded UTF-8 data into Unicode - # objects if needed. This results in ASCII data being - # stored using str() objects, and UTF-8 data using - # unicode() objects. In Python 3, we just want UTF-8 data - # using str(unicode) objects. - result = [] - if isinstance(value, bytes): - value = value.split(b",") + def _parse_str(self, value): + """Parse the provided list string and return it as a list.""" + # Automatically transform encoded UTF-8 data into Unicode + # objects if needed. This results in ASCII data being + # stored using str() objects, and UTF-8 data using + # unicode() objects. In Python 3, we just want UTF-8 data + # using str(unicode) objects. + result = [] + if isinstance(value, bytes): + value = value.split(b",") + else: + value = value.split(",") + for v in value: + try: + if six.PY2: + v = v.encode("ascii") else: - value = value.split(",") - for v in value: - try: - if six.PY2: - v = v.encode("ascii") - else: - v= misc.force_str(v) - except ValueError: - if not isinstance(v, six.text_type): - try: - v = v.decode("utf-8") - except ValueError: - # Arbitrary 8-bit data not - # supported for simple lists. - raise InvalidPropertyValueError( - prop=self.name, - value=value) - result.append(v) - return result - - def __str__(self): - if self.value and len(self.value): - # Performing the join using a unicode string results in - # a single unicode string object. - return u",".join(self.value) - return u"" + v = misc.force_str(v) + except ValueError: + if not isinstance(v, six.text_type): + try: + v = v.decode("utf-8") + except ValueError: + # Arbitrary 8-bit data not + # supported for simple lists. + raise InvalidPropertyValueError( + prop=self.name, value=value + ) + result.append(v) + return result + + def __str__(self): + if self.value and len(self.value): + # Performing the join using a unicode string results in + # a single unicode string object. + return ",".join(self.value) + return "" class PropPubURI(Property): - """Class representing publisher URI properties.""" + """Class representing publisher URI properties.""" - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. + """ - # Enforce base class rules. - Property._is_allowed(self, value) + # Enforce base class rules. + Property._is_allowed(self, value) - if value == "": - return + if value == "": + return - valid = misc.valid_pub_url(value) - if not valid: - raise InvalidPropertyValueError(prop=self.name, - value=value) + valid = misc.valid_pub_url(value) + if not valid: + raise InvalidPropertyValueError(prop=self.name, value=value) class PropSimplePubURIList(PropSimpleList): - """Class representing a property for a list of publisher URIs. Output - is in a basic comma-separated format that may not be suitable for some - datasets. This class exists for compatibility with older configuration - files that stored lists of data in this format and should not be used - for new consumers. + """Class representing a property for a list of publisher URIs. Output + is in a basic comma-separated format that may not be suitable for some + datasets. This class exists for compatibility with older configuration + files that stored lists of data in this format and should not be used + for new consumers. + """ + + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. """ - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ - - # Enforce base class rules. - PropSimpleList._is_allowed(self, value) + # Enforce base class rules. + PropSimpleList._is_allowed(self, value) - valid = misc.valid_pub_url(value) - if not valid: - raise InvalidPropertyValueError(prop=self.name, - value=value) + valid = misc.valid_pub_url(value) + if not valid: + raise InvalidPropertyValueError(prop=self.name, value=value) class PropPubURIList(PropList): - """Class representing a property for a list of publisher URIs.""" + """Class representing a property for a list of publisher URIs.""" - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. + """ - # Enforce base class rules. - PropList._is_allowed(self, value) + # Enforce base class rules. + PropList._is_allowed(self, value) - valid = misc.valid_pub_url(value) - if not valid: - raise InvalidPropertyValueError(prop=self.name, - value=value) + valid = misc.valid_pub_url(value) + if not valid: + raise InvalidPropertyValueError(prop=self.name, value=value) class PropPubURIDictionaryList(PropDictionaryList): - """Class representing a list of values associated with a given publisher - URI. + """Class representing a list of values associated with a given publisher + URI. - A PropPubURIDictionaryList contains a series of dictionaries, where - each dictionary must have a "uri" key with a valid URI as a value. + A PropPubURIDictionaryList contains a series of dictionaries, where + each dictionary must have a "uri" key with a valid URI as a value. - eg. + eg. - [ {'uri':'http://foo', - 'proxy': 'http://foo-proxy'}, - {'uri': 'http://bar', - 'proxy': http://bar-proxy'} - ... ] - """ + [ {'uri':'http://foo', + 'proxy': 'http://foo-proxy'}, + {'uri': 'http://bar', + 'proxy': http://bar-proxy'} + ... ] + """ - def _is_allowed(self, value): - """Raises an InvalidPropertyValueError if 'value' is not allowed - for this property. - """ + def _is_allowed(self, value): + """Raises an InvalidPropertyValueError if 'value' is not allowed + for this property. + """ - # Enforce base class rules. - PropDictionaryList._is_allowed(self, value) + # Enforce base class rules. + PropDictionaryList._is_allowed(self, value) - for dic in value: - if 'uri' not in dic: - raise InvalidPropertyValueError(prop=self.name, - value=value) - if not misc.valid_pub_url(dic["uri"]): - raise InvalidPropertyValueError(prop=self.name, - value=value) + for dic in value: + if "uri" not in dic: + raise InvalidPropertyValueError(prop=self.name, value=value) + if not misc.valid_pub_url(dic["uri"]): + raise InvalidPropertyValueError(prop=self.name, value=value) class PropUUID(Property): - """Class representing a Universally Unique Identifier property.""" + """Class representing a Universally Unique Identifier property.""" - def _is_allowed(self, value): - if value == "": - return + def _is_allowed(self, value): + if value == "": + return - try: - uuid.UUID(hex=str(value)) - except Exception: - # Not a valid UUID. - raise InvalidPropertyValueError(prop=self.name, - value=value) + try: + uuid.UUID(hex=str(value)) + except Exception: + # Not a valid UUID. + raise InvalidPropertyValueError(prop=self.name, value=value) class PropVersion(Property): - """Class representing a property with a non-negative integer dotsequence - value.""" + """Class representing a property with a non-negative integer dotsequence + value.""" + + def __init__(self, name, default="0", value_map=misc.EmptyDict): + Property.__init__(self, name, default=default, value_map=value_map) + + def __str__(self): + return self.value.get_short_version() + + @Property.value.setter + def value(self, value): + if isinstance(value, six.string_types): + value = self._value_map.get(value, value) + if value is None or value == "": + value = "0" + + if isinstance(value, pkg.version.Version): + nvalue = value + else: + try: + nvalue = pkg.version.Version(value) + except Exception: + raise InvalidPropertyValueError(prop=self.name, value=value) - def __init__(self, name, default="0", value_map=misc.EmptyDict): - Property.__init__(self, name, default=default, - value_map=value_map) + self._value = nvalue - def __str__(self): - return self.value.get_short_version() - @Property.value.setter - def value(self, value): - if isinstance(value, six.string_types): - value = self._value_map.get(value, value) - if value is None or value == "": - value = "0" +@python_2_unicode_compatible +class PropertySection(object): + """A class representing a section of the configuration that also + provides an interface for adding and managing properties and sections + for the section.""" + + # Whitespace (except single space), '/', and '\' are never allowed + # although consumers can place additional restrictions by providing + # a name re. In addition, the name "CONFIGURATION" is reserved for + # use by the configuration serialization classes. + __name_re = re.compile(r"\A[^\t\n\r\f\v\\/]+\Z") + + def __init__(self, name, properties=misc.EmptyI): + if ( + not isinstance(name, six.string_types) + or not self.__name_re.match(name) + or name == "CONFIGURATION" + ): + raise InvalidSectionNameError(name) + try: + name.encode("ascii") + except ValueError: + # Name contains non-ASCII characters. + raise InvalidSectionNameError(name) + self.__name = name + + # Should be set last. + # Dict is in arbitrary order, sort it first to ensure the + # order is same in Python 2 and 3. + self.__properties = OrderedDict((p.name, p) for p in properties) + + def __lt__(self, other): + if not isinstance(other, PropertySection): + return True + return self.name < other.name + + def __gt__(self, other): + if not isinstance(other, PropertySection): + return False + return self.name > other.name + + def __eq__(self, other): + if not isinstance(other, PropertySection): + return False + return self.name == other.name + + def __hash__(self): + return hash(self.name) + + def __copy__(self): + propsec = self.__class__(self.__name) + for p in self.get_properties(): + propsec.add_property(copy.copy(p)) + return propsec + + def __str__(self): + return six.text_type(self.name) + + def add_property(self, prop): + """Adds the specified property object to the section. The + property must not already exist.""" + assert prop.name not in self.__properties + self.__properties[prop.name] = prop + return prop + + def get_index(self): + """Returns a dictionary of property values indexed by property + name.""" + return dict( + (pname, p.value) + for pname, p in six.iteritems(self.__properties) + if hasattr(p, "value") + ) + + def get_property(self, name): + """Returns the property object with the specified name. If + not found, an UnknownPropertyError will be raised.""" + try: + return self.__properties[name] + except KeyError: + raise UnknownPropertyError(section=self.__name, prop=name) + + def get_properties(self): + """Returns a generator that yields the list of property objects.""" + return six.itervalues(self.__properties) + + def remove_property(self, name): + """Removes any matching property object from the section.""" + try: + del self.__properties[name] + except KeyError: + raise UnknownPropertyError(section=self.__name, prop=name) + + @property + def name(self): + """The name of the section.""" + return self.__name - if isinstance(value, pkg.version.Version): - nvalue = value - else: - try: - nvalue = pkg.version.Version(value) - except Exception: - raise InvalidPropertyValueError(prop=self.name, - value=value) - self._value = nvalue +class PropertySectionTemplate(object): + """A class representing a template for a section of the configuration. + These templates are used when loading existing configuration data + or when adding new sections to an existing configuration object if + the section name found matches the pattern name given for the template. + """ + + def __init__(self, name_pattern, properties=misc.EmptyI): + if not isinstance(name_pattern, six.string_types) or not name_pattern: + raise InvalidSectionTemplateNameError(section=name_pattern) + self.__name = name_pattern + try: + self.__pattern = re.compile(name_pattern) + except Exception: + # Unfortunately, python doesn't have a public exception + # class to catch re parse issues; but this only happens + # for misbehaved programs anyway. + raise InvalidSectionTemplateNameError(section=name_pattern) + self.__properties = properties + + def __copy__(self): + return self.__class__( + self.__name, properties=copy.copy(self.__properties) + ) + + def create(self, name): + """Returns a new PropertySection object based on the template + using the given name. + """ + assert self.match(name) + # A *copy* of the properties must be used to construct the new + # section; otherwise all sections created by this template will + # share the same property *objects* (which is bad). + return PropertySection( + name, properties=[copy.copy(p) for p in self.__properties] + ) + + def match(self, name): + """Returns a boolean indicating whether the given name matches + the pattern for this template. + """ + return self.__pattern.match(name) is not None + + @property + def name(self): + """The name (pattern text) of the property section template.""" + # Must return a string. + return self.__name @python_2_unicode_compatible -class PropertySection(object): - """A class representing a section of the configuration that also - provides an interface for adding and managing properties and sections - for the section.""" - - # Whitespace (except single space), '/', and '\' are never allowed - # although consumers can place additional restrictions by providing - # a name re. In addition, the name "CONFIGURATION" is reserved for - # use by the configuration serialization classes. - __name_re = re.compile(r"\A[^\t\n\r\f\v\\/]+\Z") - - def __init__(self, name, properties=misc.EmptyI): - if not isinstance(name, six.string_types) or \ - not self.__name_re.match(name) or \ - name == "CONFIGURATION": - raise InvalidSectionNameError(name) - try: - name.encode("ascii") - except ValueError: - # Name contains non-ASCII characters. - raise InvalidSectionNameError(name) - self.__name = name - - # Should be set last. - # Dict is in arbitrary order, sort it first to ensure the - # order is same in Python 2 and 3. - self.__properties = OrderedDict((p.name, p) for p in properties) - - def __lt__(self, other): - if not isinstance(other, PropertySection): - return True - return self.name < other.name - - def __gt__(self, other): - if not isinstance(other, PropertySection): - return False - return self.name > other.name - - def __eq__(self, other): - if not isinstance(other, PropertySection): - return False - return self.name == other.name - - def __hash__(self): - return hash(self.name) - - def __copy__(self): - propsec = self.__class__(self.__name) - for p in self.get_properties(): - propsec.add_property(copy.copy(p)) - return propsec - - def __str__(self): - return six.text_type(self.name) - - def add_property(self, prop): - """Adds the specified property object to the section. The - property must not already exist.""" - assert prop.name not in self.__properties - self.__properties[prop.name] = prop - return prop - - def get_index(self): - """Returns a dictionary of property values indexed by property - name.""" - return dict( - (pname, p.value) - for pname, p in six.iteritems(self.__properties) - if hasattr(p, "value") - ) - - def get_property(self, name): - """Returns the property object with the specified name. If - not found, an UnknownPropertyError will be raised.""" - try: - return self.__properties[name] - except KeyError: - raise UnknownPropertyError(section=self.__name, - prop=name) - - def get_properties(self): - """Returns a generator that yields the list of property objects. - """ - return six.itervalues(self.__properties) - - def remove_property(self, name): - """Removes any matching property object from the section.""" - try: - del self.__properties[name] - except KeyError: - raise UnknownPropertyError(section=self.__name, - prop=name) +class Config(object): + """The Config class provides basic in-memory management of configuration + data.""" - @property - def name(self): - """The name of the section.""" - return self.__name + _dirty = False + _target = None + def __init__( + self, definitions=misc.EmptyDict, overrides=misc.EmptyDict, version=None + ): + """Initializes a Config object. -class PropertySectionTemplate(object): - """A class representing a template for a section of the configuration. - These templates are used when loading existing configuration data - or when adding new sections to an existing configuration object if - the section name found matches the pattern name given for the template. + 'definitions' is a dictionary of PropertySection objects indexed + by configuration version defining the initial set of property + sections, properties, and values for a Config object. + + 'overrides' is an optional dictionary of property values indexed + by section name and property name. If provided, it will be used + to override any default values initially assigned during + initialization. + + 'version' is an integer value that will be used to determine + which configuration definition to use. If not provided, the + newest version found in 'definitions' will be used. """ - def __init__(self, name_pattern, properties=misc.EmptyI): - if not isinstance(name_pattern, six.string_types) or not name_pattern: - raise InvalidSectionTemplateNameError( - section=name_pattern) - self.__name = name_pattern - try: - self.__pattern = re.compile(name_pattern) - except Exception: - # Unfortunately, python doesn't have a public exception - # class to catch re parse issues; but this only happens - # for misbehaved programs anyway. - raise InvalidSectionTemplateNameError( - section=name_pattern) - self.__properties = properties - - def __copy__(self): - return self.__class__(self.__name, - properties=copy.copy(self.__properties)) - - def create(self, name): - """Returns a new PropertySection object based on the template - using the given name. - """ - assert self.match(name) - # A *copy* of the properties must be used to construct the new - # section; otherwise all sections created by this template will - # share the same property *objects* (which is bad). - return PropertySection(name, properties=[ - copy.copy(p) for p in self.__properties - ]) - - def match(self, name): - """Returns a boolean indicating whether the given name matches - the pattern for this template. - """ - return self.__pattern.match(name) is not None - - @property - def name(self): - """The name (pattern text) of the property section template.""" - # Must return a string. - return self.__name + assert version is None or isinstance(version, int) + + self.__sections = OrderedDict() + self._defs = definitions + if version is None: + if definitions: + version = max(definitions.keys()) + else: + version = 0 + self._version = version + self.reset(overrides=overrides) + + def __str__(self): + """Returns a unicode object representation of the configuration + object. + """ + out = "" + for sec, props in self.get_properties(): + out += "[{0}]\n".format(sec.name) + for p in props: + out += "{0} = {1}\n".format(p.name, six.text_type(p)) + out += "\n" + return out + + def _get_matching_property(self, section, name, default_type=Property): + """Returns the Property object matching the given name for + the given PropertySection object, or adds a new one (if it + does not already exist) based on class definitions. + + 'default_type' is an optional parameter specifying the type of + property to create if a class definition does not exist for the + given property. + """ + self._validate_section_name(section) + self._validate_property_name(name) + + try: + secobj = self.get_section(section) + except UnknownSectionError: + # Get a copy of the definition for this section. + secobj = self.__get_section_def(section) + + # Elide property templates. + elide = [ + p.name + for p in secobj.get_properties() + if not isinstance(p, Property) + ] + # force map() to process elements + list(map(secobj.remove_property, elide)) + self.add_section(secobj) + + try: + return secobj.get_property(name) + except UnknownPropertyError: + # See if there is an existing definition for this + # property; if there is, duplicate it, and add it + # to the section. + secdef = self.__get_section_def(secobj.name) + propobj = self.__get_property_def( + secdef, name, default_type=default_type + ) + secobj.add_property(propobj) + return propobj + + # Subclasses can redefine these to impose additional restrictions on + # section and property names. These methods should return if the name + # is valid, or raise an exception if it is not. These methods are only + # used during __init__, add_section, reset, set_property, and write. + def _validate_property_name(self, name): + """Raises an exception if property name is not valid for this + class. + """ + pass -@python_2_unicode_compatible -class Config(object): - """The Config class provides basic in-memory management of configuration - data.""" - - _dirty = False - _target = None - - def __init__(self, definitions=misc.EmptyDict, overrides=misc.EmptyDict, - version=None): - """Initializes a Config object. - - 'definitions' is a dictionary of PropertySection objects indexed - by configuration version defining the initial set of property - sections, properties, and values for a Config object. - - 'overrides' is an optional dictionary of property values indexed - by section name and property name. If provided, it will be used - to override any default values initially assigned during - initialization. - - 'version' is an integer value that will be used to determine - which configuration definition to use. If not provided, the - newest version found in 'definitions' will be used. - """ - - assert version is None or isinstance(version, int) - - self.__sections = OrderedDict() - self._defs = definitions - if version is None: - if definitions: - version = max(definitions.keys()) - else: - version = 0 - self._version = version - self.reset(overrides=overrides) - - def __str__(self): - """Returns a unicode object representation of the configuration - object. - """ - out = u"" - for sec, props in self.get_properties(): - out += u"[{0}]\n".format(sec.name) - for p in props: - out += u"{0} = {1}\n".format(p.name, six.text_type(p)) - out += u"\n" - return out - - def _get_matching_property(self, section, name, default_type=Property): - """Returns the Property object matching the given name for - the given PropertySection object, or adds a new one (if it - does not already exist) based on class definitions. - - 'default_type' is an optional parameter specifying the type of - property to create if a class definition does not exist for the - given property. - """ - - self._validate_section_name(section) - self._validate_property_name(name) + def _validate_section_name(self, name): + """Raises an exception if section name is not valid for this + class. + """ + pass - try: - secobj = self.get_section(section) - except UnknownSectionError: - # Get a copy of the definition for this section. - secobj = self.__get_section_def(section) - - # Elide property templates. - elide = [ - p.name for p in secobj.get_properties() - if not isinstance(p, Property) - ] - # force map() to process elements - list(map(secobj.remove_property, elide)) - self.add_section(secobj) + def __get_property_def(self, secdef, name, default_type=Property): + """Returns a new Property object for the given name based on + class definitions (if available). + """ - try: - return secobj.get_property(name) - except UnknownPropertyError: - # See if there is an existing definition for this - # property; if there is, duplicate it, and add it - # to the section. - secdef = self.__get_section_def(secobj.name) - propobj = self.__get_property_def(secdef, name, - default_type=default_type) - secobj.add_property(propobj) - return propobj - - # Subclasses can redefine these to impose additional restrictions on - # section and property names. These methods should return if the name - # is valid, or raise an exception if it is not. These methods are only - # used during __init__, add_section, reset, set_property, and write. - def _validate_property_name(self, name): - """Raises an exception if property name is not valid for this - class. - """ - pass + try: + propobj = secdef.get_property(name) + return copy.copy(propobj) + except UnknownPropertyError: + # No specific definition found for this section, + # see if there is a suitable template for creating + # one. + for p in secdef.get_properties(): + if not isinstance(p, PropertyTemplate): + continue + if p.match(name): + return p.create(name) + + # Not a known property; create a new one using + # the default type. + return default_type(name) + + def __get_section_def(self, name): + """Returns a new PropertySection object for the given name based + on class definitions (if available). + """ - def _validate_section_name(self, name): - """Raises an exception if section name is not valid for this - class. - """ - pass + # See if there is an existing definition for this + # section; if there is, return a copy. + for s in self._defs.get(self._version, misc.EmptyDict): + if not isinstance(s, PropertySection): + # Ignore section templates. + continue + if s.name == name: + return copy.copy(s) + else: + # No specific definition found for this section, + # see if there is a suitable template for creating + # one. + for s in self._defs.get(self._version, misc.EmptyDict): + if not isinstance(s, PropertySectionTemplate): + continue + if s.match(name): + return s.create(name) + return PropertySection(name) + + def __reset(self, overrides=misc.EmptyDict): + """Returns the configuration object to its default state.""" + self.__sections = OrderedDict() + for s in self._defs.get(self._version, misc.EmptyDict): + if not isinstance(s, PropertySection): + # Templates should be skipped during reset. + continue + self._validate_section_name(s.name) + + # Elide property templates. + secobj = copy.copy(s) + elide = [ + p.name + for p in secobj.get_properties() + if not isinstance(p, Property) + ] + list(map(secobj.remove_property, elide)) + self.add_section(secobj) + + for sname, props in six.iteritems(overrides): + for pname, val in six.iteritems(props): + self.set_property(sname, pname, val) + + def add_property_value(self, section, name, value): + """Adds the value to the property object matching the given + section and name. If the section or property does not already + exist, it will be added. Raises InvalidPropertyValueError if + the value is not valid for the given property or if the target + property isn't a list.""" + + propobj = self._get_matching_property( + section, name, default_type=PropList + ) + if not isinstance(propobj.value, list): + raise PropertyMultiValueError( + section=section, prop=name, value=value + ) + + # If a value was just appended directly, the property class + # set method wouldn't be executed and the value added wouldn't + # get verified, so append to a copy of the property's value and + # then set the property to the new value. This allows the new + # value to be verified and/or rejected without affecting the + # property. + pval = copy.copy(propobj.value) + pval.append(value) + try: + propobj.value = pval + except PropertyConfigError as e: + if hasattr(e, "section") and not e.section: + e.section = section + raise + self._dirty = True + + def add_section(self, section): + """Adds the specified property section object. The section must + not already exist. + """ + assert isinstance(section, PropertySection) + assert section.name not in self.__sections + self._validate_section_name(section.name) + self.__sections[section.name] = section + + def get_index(self): + """Returns a dictionary of dictionaries indexed by section name + and then property name for all properties.""" + return dict((s.name, s.get_index()) for s in self.get_sections()) + + def get_property(self, section, name): + """Returns the value of the property object matching the given + section and name. Raises UnknownPropertyError if it does not + exist. + + Be aware that references to the original value are returned; + if the return value is not an immutable object (such as a list), + changes to the object will affect the property. If the return + value needs to be modified, consumers are advised to create a + copy first, and then call set_property() to update the value. + Calling set_property() with the updated value is the only way + to ensure that changes to a property's value are persistent. + """ + try: + sec = self.get_section(section) + except UnknownSectionError: + # To aid in debugging, re-raise as a property error + # so that both the unknown section and property are + # in the error message. + raise UnknownPropertyError(section=section, prop=name) + return sec.get_property(name).value + + def get_properties(self): + """Returns a generator that yields a list of tuples of the form + (section object, property generator). The property generator + yields the list of property objects for the section. + """ + return ((s, s.get_properties()) for s in self.get_sections()) - def __get_property_def(self, secdef, name, default_type=Property): - """Returns a new Property object for the given name based on - class definitions (if available). - """ + def get_section(self, name): + """Returns the PropertySection object with the given name. + Raises UnknownSectionError if it does not exist. + """ + try: + return self.__sections[name] + except KeyError: + raise UnknownSectionError(section=name) + + def get_sections(self): + """Returns a generator that yields the list of property section + objects.""" + return six.itervalues(self.__sections) + + def remove_property(self, section, name): + """Remove the property object matching the given section and + name. Raises UnknownPropertyError if it does not exist. + """ + try: + sec = self.get_section(section) + except UnknownSectionError: + # To aid in debugging, re-raise as a property error + # so that both the unknown section and property are + # in the error message. + raise UnknownPropertyError(section=section, prop=name) + sec.remove_property(name) + self._dirty = True + + def remove_property_value(self, section, name, value): + """Removes the value from the list of values for the property + object matching the given section and name. Raises + UnknownPropertyError if the property or section does not + exist. Raises InvalidPropertyValueError if the value is not + valid for the given property or if the target property isn't a + list.""" + + self._validate_section_name(section) + self._validate_property_name(name) + + try: + secobj = self.get_section(section) + except UnknownSectionError: + # To aid in debugging, re-raise as a property error + # so that both the unknown section and property are + # in the error message. + raise UnknownPropertyError(section=section, prop=name) + + propobj = secobj.get_property(name) + if not isinstance(propobj.value, list): + raise PropertyMultiValueError( + section=section, prop=name, value=value + ) + + # Remove the value from a copy of the actual property object + # value so that the property's set verification can happen. + pval = copy.copy(propobj.value) + try: + pval.remove(value) + except ValueError: + raise UnknownPropertyValueError( + section=section, prop=name, value=value + ) + else: + try: + propobj.value = pval + except PropertyConfigError as e: + if hasattr(e, "section") and not e.section: + e.section = section + raise + self._dirty = True + + def remove_section(self, name): + """Remove the object matching the given section name. Raises + UnknownSectionError if it does not exist. + """ + try: + del self.__sections[name] + except KeyError: + raise UnknownSectionError(section=name) + self._dirty = True + + def reset(self, overrides=misc.EmptyDict): + """Discards current configuration data and returns the + configuration object to its initial state. + + 'overrides' is an optional dictionary of property values + indexed by section name and property name. If provided, + it will be used to override any default values initially + assigned during reset. + """ - try: - propobj = secdef.get_property(name) - return copy.copy(propobj) - except UnknownPropertyError: - # No specific definition found for this section, - # see if there is a suitable template for creating - # one. - for p in secdef.get_properties(): - if not isinstance(p, PropertyTemplate): - continue - if p.match(name): - return p.create(name) - - # Not a known property; create a new one using - # the default type. - return default_type(name) - - def __get_section_def(self, name): - """Returns a new PropertySection object for the given name based - on class definitions (if available). - """ - - # See if there is an existing definition for this - # section; if there is, return a copy. - for s in self._defs.get(self._version, misc.EmptyDict): - if not isinstance(s, PropertySection): - # Ignore section templates. - continue - if s.name == name: - return copy.copy(s) - else: - # No specific definition found for this section, - # see if there is a suitable template for creating - # one. - for s in self._defs.get(self._version, - misc.EmptyDict): - if not isinstance(s, - PropertySectionTemplate): - continue - if s.match(name): - return s.create(name) - return PropertySection(name) - - def __reset(self, overrides=misc.EmptyDict): - """Returns the configuration object to its default state.""" - self.__sections = OrderedDict() - for s in self._defs.get(self._version, misc.EmptyDict): - if not isinstance(s, PropertySection): - # Templates should be skipped during reset. - continue - self._validate_section_name(s.name) - - # Elide property templates. - secobj = copy.copy(s) - elide = [ - p.name for p in secobj.get_properties() - if not isinstance(p, Property) - ] - list(map(secobj.remove_property, elide)) - self.add_section(secobj) - - for sname, props in six.iteritems(overrides): - for pname, val in six.iteritems(props): - self.set_property(sname, pname, val) - - def add_property_value(self, section, name, value): - """Adds the value to the property object matching the given - section and name. If the section or property does not already - exist, it will be added. Raises InvalidPropertyValueError if - the value is not valid for the given property or if the target - property isn't a list.""" - - propobj = self._get_matching_property(section, name, - default_type=PropList) - if not isinstance(propobj.value, list): - raise PropertyMultiValueError(section=section, - prop=name, value=value) - - # If a value was just appended directly, the property class - # set method wouldn't be executed and the value added wouldn't - # get verified, so append to a copy of the property's value and - # then set the property to the new value. This allows the new - # value to be verified and/or rejected without affecting the - # property. - pval = copy.copy(propobj.value) - pval.append(value) - try: - propobj.value = pval - except PropertyConfigError as e: - if hasattr(e, "section") and not e.section: - e.section = section - raise - self._dirty = True - - def add_section(self, section): - """Adds the specified property section object. The section must - not already exist. - """ - assert isinstance(section, PropertySection) - assert section.name not in self.__sections - self._validate_section_name(section.name) - self.__sections[section.name] = section - - def get_index(self): - """Returns a dictionary of dictionaries indexed by section name - and then property name for all properties.""" - return dict( - (s.name, s.get_index()) - for s in self.get_sections() - ) - - def get_property(self, section, name): - """Returns the value of the property object matching the given - section and name. Raises UnknownPropertyError if it does not - exist. - - Be aware that references to the original value are returned; - if the return value is not an immutable object (such as a list), - changes to the object will affect the property. If the return - value needs to be modified, consumers are advised to create a - copy first, and then call set_property() to update the value. - Calling set_property() with the updated value is the only way - to ensure that changes to a property's value are persistent. - """ - try: - sec = self.get_section(section) - except UnknownSectionError: - # To aid in debugging, re-raise as a property error - # so that both the unknown section and property are - # in the error message. - raise UnknownPropertyError(section=section, prop=name) - return sec.get_property(name).value - - def get_properties(self): - """Returns a generator that yields a list of tuples of the form - (section object, property generator). The property generator - yields the list of property objects for the section. - """ - return ( - (s, s.get_properties()) - for s in self.get_sections() - ) - - def get_section(self, name): - """Returns the PropertySection object with the given name. - Raises UnknownSectionError if it does not exist. - """ - try: - return self.__sections[name] - except KeyError: - raise UnknownSectionError(section=name) - - def get_sections(self): - """Returns a generator that yields the list of property section - objects.""" - return six.itervalues(self.__sections) - - def remove_property(self, section, name): - """Remove the property object matching the given section and - name. Raises UnknownPropertyError if it does not exist. - """ - try: - sec = self.get_section(section) - except UnknownSectionError: - # To aid in debugging, re-raise as a property error - # so that both the unknown section and property are - # in the error message. - raise UnknownPropertyError(section=section, prop=name) - sec.remove_property(name) - self._dirty = True - - def remove_property_value(self, section, name, value): - """Removes the value from the list of values for the property - object matching the given section and name. Raises - UnknownPropertyError if the property or section does not - exist. Raises InvalidPropertyValueError if the value is not - valid for the given property or if the target property isn't a - list.""" - - self._validate_section_name(section) - self._validate_property_name(name) + # Initialize to default state. + self._dirty = True + self.__reset(overrides=overrides) + + def set_property(self, section, name, value): + """Sets the value of the property object matching the given + section and name. If the section or property does not already + exist, it will be added. Raises InvalidPropertyValueError if + the value is not valid for the given property.""" + + self._validate_section_name(section) + self._validate_property_name(name) + + propobj = self._get_matching_property(section, name) + try: + propobj.value = value + except PropertyConfigError as e: + if hasattr(e, "section") and not e.section: + e.section = section + raise + self._dirty = True + + def set_properties(self, properties): + """Sets the values of the property objects matching those found + in the provided dictionary. If any section or property does not + already exist, it will be added. An InvalidPropertyValueError + will be raised if the value is not valid for the given + properties. + + 'properties' should be a dictionary of dictionaries indexed by + section and then by property name. As an example: + + { + 'section': { + 'property': value + } + } + """ - try: - secobj = self.get_section(section) - except UnknownSectionError: - # To aid in debugging, re-raise as a property error - # so that both the unknown section and property are - # in the error message. - raise UnknownPropertyError(section=section, prop=name) - - propobj = secobj.get_property(name) - if not isinstance(propobj.value, list): - raise PropertyMultiValueError(section=section, - prop=name, value=value) - - # Remove the value from a copy of the actual property object - # value so that the property's set verification can happen. - pval = copy.copy(propobj.value) - try: - pval.remove(value) - except ValueError: - raise UnknownPropertyValueError(section=section, - prop=name, value=value) - else: - try: - propobj.value = pval - except PropertyConfigError as e: - if hasattr(e, "section") and not e.section: - e.section = section - raise - self._dirty = True - - def remove_section(self, name): - """Remove the object matching the given section name. Raises - UnknownSectionError if it does not exist. - """ - try: - del self.__sections[name] - except KeyError: - raise UnknownSectionError(section=name) - self._dirty = True - - def reset(self, overrides=misc.EmptyDict): - """Discards current configuration data and returns the - configuration object to its initial state. - - 'overrides' is an optional dictionary of property values - indexed by section name and property name. If provided, - it will be used to override any default values initially - assigned during reset. - """ - - # Initialize to default state. - self._dirty = True - self.__reset(overrides=overrides) - - def set_property(self, section, name, value): - """Sets the value of the property object matching the given - section and name. If the section or property does not already - exist, it will be added. Raises InvalidPropertyValueError if - the value is not valid for the given property.""" - - self._validate_section_name(section) - self._validate_property_name(name) - - propobj = self._get_matching_property(section, name) - try: - propobj.value = value - except PropertyConfigError as e: - if hasattr(e, "section") and not e.section: - e.section = section - raise - self._dirty = True - - def set_properties(self, properties): - """Sets the values of the property objects matching those found - in the provided dictionary. If any section or property does not - already exist, it will be added. An InvalidPropertyValueError - will be raised if the value is not valid for the given - properties. - - 'properties' should be a dictionary of dictionaries indexed by - section and then by property name. As an example: - - { - 'section': { - 'property': value - } - } - """ - - # Dict is in arbitrary order, sort it first to ensure the - # order is same in Python 2 and 3. - properties = OrderedDict(sorted(properties.items())) - for section, props in six.iteritems(properties): - props = OrderedDict(sorted(props.items())) - for pname, pval in six.iteritems(props): - self.set_property(section, pname, pval) - - @property - def target(self): - """Returns the target used for storage and retrieval of - configuration data. This can be None, a pathname, or - an SMF FMRI. - """ - return self._target - - @property - def version(self): - """Returns an integer value used to indicate what set of - configuration data is in use.""" - - return self._version - - def write(self): - """Saves the current configuration object to the target - provided at initialization. - """ - pass + # Dict is in arbitrary order, sort it first to ensure the + # order is same in Python 2 and 3. + properties = OrderedDict(sorted(properties.items())) + for section, props in six.iteritems(properties): + props = OrderedDict(sorted(props.items())) + for pname, pval in six.iteritems(props): + self.set_property(section, pname, pval) + + @property + def target(self): + """Returns the target used for storage and retrieval of + configuration data. This can be None, a pathname, or + an SMF FMRI. + """ + return self._target + + @property + def version(self): + """Returns an integer value used to indicate what set of + configuration data is in use.""" + + return self._version + + def write(self): + """Saves the current configuration object to the target + provided at initialization. + """ + pass class FileConfig(Config): - """The FileConfig class provides file-based retrieval and storage of - non-structured (one-level deep) configuration data. This particular - class uses Python's ConfigParser module for configuration storage and - management. - - ConfigParser uses a simple text format that consists of sections, lead - by a "[section]" header, and followed by "name = value" entries, with - continuations, etc. in the style of RFC 822. Values can be split over - multiple lines by beginning continuation lines with whitespace. A - sample configuration file might look like this: - - [pkg] - port = 80 - inst_root = /export/repo - - [pub_example_com] - feed_description = example.com's software - update log + """The FileConfig class provides file-based retrieval and storage of + non-structured (one-level deep) configuration data. This particular + class uses Python's ConfigParser module for configuration storage and + management. + + ConfigParser uses a simple text format that consists of sections, lead + by a "[section]" header, and followed by "name = value" entries, with + continuations, etc. in the style of RFC 822. Values can be split over + multiple lines by beginning continuation lines with whitespace. A + sample configuration file might look like this: + + [pkg] + port = 80 + inst_root = /export/repo + + [pub_example_com] + feed_description = example.com's software + update log + """ + + def __init__( + self, + pathname, + definitions=misc.EmptyDict, + overrides=misc.EmptyDict, + version=None, + ): + """Initializes the object. + + 'pathname' is the name of the file to read existing + configuration data from or to write new configuration + data to. If the file does not already exist, defaults + are set based on the version provided and the file will + be created when the configuration is written. + + 'definitions' is a dictionary of PropertySection objects indexed + by configuration version defining the initial set of property + sections, properties, and values for a Config object. + + 'overrides' is an optional dictionary of property values indexed + by section name and property name. If provided, it will be used + to override any default values initially assigned during + initialization. + + 'version' is an integer value that will be used to determine + which configuration definition to use. If not provided, the + version will be based on the contents of the configuration + file or the newest version found in 'definitions'. """ + # Must be set first. + self._target = pathname - def __init__(self, pathname, definitions=misc.EmptyDict, - overrides=misc.EmptyDict, version=None): - """Initializes the object. - - 'pathname' is the name of the file to read existing - configuration data from or to write new configuration - data to. If the file does not already exist, defaults - are set based on the version provided and the file will - be created when the configuration is written. - - 'definitions' is a dictionary of PropertySection objects indexed - by configuration version defining the initial set of property - sections, properties, and values for a Config object. - - 'overrides' is an optional dictionary of property values indexed - by section name and property name. If provided, it will be used - to override any default values initially assigned during - initialization. - - 'version' is an integer value that will be used to determine - which configuration definition to use. If not provided, the - version will be based on the contents of the configuration - file or the newest version found in 'definitions'. - """ - # Must be set first. - self._target = pathname - - Config.__init__(self, definitions=definitions, - overrides=overrides, version=version) - - def __read(self, overrides=misc.EmptyDict): - """Reads the specified pathname and populates the configuration - object based on the data contained within. The file is - expected to be in a ConfigParser-compatible format. - """ - - # First, attempt to read the target. - cp = configparser.RawConfigParser() - # Disabled ConfigParser's inane option transformation to ensure - # option case is preserved. - cp.optionxform = lambda x: x + Config.__init__( + self, definitions=definitions, overrides=overrides, version=version + ) - try: - efile = codecs.open(self._target, mode="rb", - encoding="utf-8") - except EnvironmentError as e: - if e.errno == errno.ENOENT: - # Assume default configuration. - pass - elif e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - else: - raise + def __read(self, overrides=misc.EmptyDict): + """Reads the specified pathname and populates the configuration + object based on the data contained within. The file is + expected to be in a ConfigParser-compatible format. + """ + + # First, attempt to read the target. + cp = configparser.RawConfigParser() + # Disabled ConfigParser's inane option transformation to ensure + # option case is preserved. + cp.optionxform = lambda x: x + + try: + efile = codecs.open(self._target, mode="rb", encoding="utf-8") + except EnvironmentError as e: + if e.errno == errno.ENOENT: + # Assume default configuration. + pass + elif e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + else: + raise + else: + try: + # readfp() will be removed in futher Python + # versions, use read_file() instead. + if six.PY2: + cp.readfp(efile) else: - try: - # readfp() will be removed in futher Python - # versions, use read_file() instead. - if six.PY2: - cp.readfp(efile) - else: - cp.read_file(efile) - except (configparser.ParsingError, - configparser.MissingSectionHeaderError) as e: - raise api_errors.InvalidConfigFile( - self._target) - # Attempt to determine version from contents. - try: - version = cp.getint("CONFIGURATION", "version") - self._version = version - except (configparser.NoSectionError, - configparser.NoOptionError, ValueError): - # Assume current version. - pass - efile.close() - - # Reset to initial state to ensure the default set of properties - # and values exists so that any values not specified by the - # saved configuration or overrides will be correct. This must - # be done after the version is determined above so that the - # saved configuration data can be merged with the correct - # configuration definition. - Config.reset(self, overrides=overrides) - - for section in cp.sections(): - if section == "CONFIGURATION": - # Reserved for configuration file management. - continue - for prop, value in cp.items(section): - if section in overrides and \ - prop in overrides[section]: - continue - - propobj = self._get_matching_property(section, - prop) - - # Try to convert unicode object to str object - # to ensure comparisons works as expected for - # consumers. - try: - value = str(value) - except UnicodeEncodeError: - # Value contains unicode. - pass - try: - propobj.value = value - except PropertyConfigError as e: - if hasattr(e, "section") and \ - not e.section: - e.section = section - raise - - def reset(self, overrides=misc.EmptyDict): - """Discards current configuration state and returns the - configuration object to its initial state. - - 'overrides' is an optional dictionary of property values - indexed by section name and property name. If provided, - it will be used to override any default values initially - assigned during reset. - """ - - # Reload the configuration. - self.__read(overrides=overrides) - - if not overrides: - # Unless there were overrides, ignore any initial - # values for the purpose of determining whether a - # write should occur. This isn't strictly correct, - # but is the desired behaviour in most cases. This - # also matches the historical behaviour of the - # configuration classes used in pkg(7). - self._dirty = False - - def write(self): - """Saves the configuration data using the pathname provided at - initialization. - """ - - if os.path.exists(self._target) and not self._dirty: - return - - cp = configparser.RawConfigParser() - # Disabled ConfigParser's inane option transformation to ensure - # option case is preserved. - cp.optionxform = lambda x: x - - for section, props in self.get_properties(): - assert isinstance(section, PropertySection) - cp.add_section(section.name) - for p in props: - assert isinstance(p, Property) - cp.set(section.name, p.name, misc.force_str(p)) - - # Used to track configuration management information. - cp.add_section("CONFIGURATION") - cp.set("CONFIGURATION", "version", str(self._version)) - - fn = None + cp.read_file(efile) + except ( + configparser.ParsingError, + configparser.MissingSectionHeaderError, + ) as e: + raise api_errors.InvalidConfigFile(self._target) + # Attempt to determine version from contents. + try: + version = cp.getint("CONFIGURATION", "version") + self._version = version + except ( + configparser.NoSectionError, + configparser.NoOptionError, + ValueError, + ): + # Assume current version. + pass + efile.close() + + # Reset to initial state to ensure the default set of properties + # and values exists so that any values not specified by the + # saved configuration or overrides will be correct. This must + # be done after the version is determined above so that the + # saved configuration data can be merged with the correct + # configuration definition. + Config.reset(self, overrides=overrides) + + for section in cp.sections(): + if section == "CONFIGURATION": + # Reserved for configuration file management. + continue + for prop, value in cp.items(section): + if section in overrides and prop in overrides[section]: + continue + + propobj = self._get_matching_property(section, prop) + + # Try to convert unicode object to str object + # to ensure comparisons works as expected for + # consumers. try: - dirname = os.path.dirname(self._target) + value = str(value) + except UnicodeEncodeError: + # Value contains unicode. + pass + try: + propobj.value = value + except PropertyConfigError as e: + if hasattr(e, "section") and not e.section: + e.section = section + raise + + def reset(self, overrides=misc.EmptyDict): + """Discards current configuration state and returns the + configuration object to its initial state. + + 'overrides' is an optional dictionary of property values + indexed by section name and property name. If provided, + it will be used to override any default values initially + assigned during reset. + """ - fd = None - st = None - try: - fd, fn = tempfile.mkstemp(dir=dirname) - st = os.stat(self._target) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - if st: - os.fchmod(fd, stat.S_IMODE(st.st_mode)) - try: - portable.chown(fn, st.st_uid, st.st_gid) - except OSError as e: - if e.errno != errno.EPERM: - raise - else: - os.fchmod(fd, misc.PKG_FILE_MODE) - - if six.PY2: - with os.fdopen(fd, "wb") as f: - with codecs.EncodedFile(f, "utf-8") as ef: - cp.write(ef) - else: - # it becomes easier to open the file - with open(fd, "w", encoding="utf-8") as f: - cp.write(f) - portable.rename(fn, self._target) - self._dirty = False - except EnvironmentError as e: - if e.errno == errno.EACCES: - raise api_errors.PermissionsException( - e.filename) - elif e.errno == errno.EROFS: - raise api_errors.ReadOnlyFileSystemException( - e.filename) + # Reload the configuration. + self.__read(overrides=overrides) + + if not overrides: + # Unless there were overrides, ignore any initial + # values for the purpose of determining whether a + # write should occur. This isn't strictly correct, + # but is the desired behaviour in most cases. This + # also matches the historical behaviour of the + # configuration classes used in pkg(7). + self._dirty = False + + def write(self): + """Saves the configuration data using the pathname provided at + initialization. + """ + + if os.path.exists(self._target) and not self._dirty: + return + + cp = configparser.RawConfigParser() + # Disabled ConfigParser's inane option transformation to ensure + # option case is preserved. + cp.optionxform = lambda x: x + + for section, props in self.get_properties(): + assert isinstance(section, PropertySection) + cp.add_section(section.name) + for p in props: + assert isinstance(p, Property) + cp.set(section.name, p.name, misc.force_str(p)) + + # Used to track configuration management information. + cp.add_section("CONFIGURATION") + cp.set("CONFIGURATION", "version", str(self._version)) + + fn = None + try: + dirname = os.path.dirname(self._target) + + fd = None + st = None + try: + fd, fn = tempfile.mkstemp(dir=dirname) + st = os.stat(self._target) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + if st: + os.fchmod(fd, stat.S_IMODE(st.st_mode)) + try: + portable.chown(fn, st.st_uid, st.st_gid) + except OSError as e: + if e.errno != errno.EPERM: raise - finally: - if fn and os.path.exists(fn): - os.unlink(fn) + else: + os.fchmod(fd, misc.PKG_FILE_MODE) + + if six.PY2: + with os.fdopen(fd, "wb") as f: + with codecs.EncodedFile(f, "utf-8") as ef: + cp.write(ef) + else: + # it becomes easier to open the file + with open(fd, "w", encoding="utf-8") as f: + cp.write(f) + portable.rename(fn, self._target) + self._dirty = False + except EnvironmentError as e: + if e.errno == errno.EACCES: + raise api_errors.PermissionsException(e.filename) + elif e.errno == errno.EROFS: + raise api_errors.ReadOnlyFileSystemException(e.filename) + raise + finally: + if fn and os.path.exists(fn): + os.unlink(fn) # For SMF properties and property groups, this defines the naming restrictions. # Although, additional restrictions may be imposed by the property and section # classes in this module. -_SMF_name_re = '^([A-Za-z][ A-Za-z0-9.-]*,)?[A-Za-z][ A-Za-z0-9-_]*$' +_SMF_name_re = "^([A-Za-z][ A-Za-z0-9.-]*,)?[A-Za-z][ A-Za-z0-9-_]*$" + class SMFInvalidPropertyNameError(PropertyConfigError): - """Exception class used to indicate an invalid SMF property name.""" + """Exception class used to indicate an invalid SMF property name.""" - def __init__(self, prop): - assert prop is not None - PropertyConfigError.__init__(self, prop=prop) + def __init__(self, prop): + assert prop is not None + PropertyConfigError.__init__(self, prop=prop) - def __str__(self): - return _("Property name '{name}' is not valid. Property " - "names may not contain: tabs, newlines, carriage returns, " - "form feeds, vertical tabs, slashes, or backslashes and " - "must also match the regular expression: {exp}").format( - name=self.prop, exp=_SMF_name_re) + def __str__(self): + return _( + "Property name '{name}' is not valid. Property " + "names may not contain: tabs, newlines, carriage returns, " + "form feeds, vertical tabs, slashes, or backslashes and " + "must also match the regular expression: {exp}" + ).format(name=self.prop, exp=_SMF_name_re) class SMFInvalidSectionNameError(PropertyConfigError): - """Exception class used to indicate an invalid SMF section name.""" + """Exception class used to indicate an invalid SMF section name.""" - def __init__(self, section): - assert section is not None - PropertyConfigError.__init__(self, section=section) + def __init__(self, section): + assert section is not None + PropertyConfigError.__init__(self, section=section) - def __str__(self): - return _("Section name '{name}' is not valid. Section names " - "may not contain: tabs, newlines, carriage returns, form " - "feeds, vertical tabs, slashes, or backslashes and must " - "also match the regular expression: {exp}").format( - name=self.prop, exp=_SMF_name_re) + def __str__(self): + return _( + "Section name '{name}' is not valid. Section names " + "may not contain: tabs, newlines, carriage returns, form " + "feeds, vertical tabs, slashes, or backslashes and must " + "also match the regular expression: {exp}" + ).format(name=self.prop, exp=_SMF_name_re) class SMFReadError(ConfigError): - """Exception classes used to indicate that an error was encountered - while attempting to read configuration data from SMF.""" + """Exception classes used to indicate that an error was encountered + while attempting to read configuration data from SMF.""" - def __init__(self, svc_fmri, errmsg): - ConfigError.__init__(self) - assert svc_fmri and errmsg - self.fmri = svc_fmri - self.errmsg = errmsg + def __init__(self, svc_fmri, errmsg): + ConfigError.__init__(self) + assert svc_fmri and errmsg + self.fmri = svc_fmri + self.errmsg = errmsg - def __str__(self): - return _("Unable to read configuration data for SMF FMRI " - "'{fmri}':\n{errmsg}").format(**self.__dict__) + def __str__(self): + return _( + "Unable to read configuration data for SMF FMRI " + "'{fmri}':\n{errmsg}" + ).format(**self.__dict__) class SMFWriteError(ConfigError): - """Exception classes used to indicate that an error was encountered - while attempting to write configuration data to SMF.""" + """Exception classes used to indicate that an error was encountered + while attempting to write configuration data to SMF.""" - def __init__(self, svc_fmri, errmsg): - ConfigError.__init__(self) - assert svc_fmri and errmsg - self.fmri = svc_fmri - self.errmsg = errmsg + def __init__(self, svc_fmri, errmsg): + ConfigError.__init__(self) + assert svc_fmri and errmsg + self.fmri = svc_fmri + self.errmsg = errmsg - def __str__(self): - return _("Unable to write configuration data for SMF FMRI " - "'{fmri}':\n{errmsg}").format(**self.__dict__) + def __str__(self): + return _( + "Unable to write configuration data for SMF FMRI " + "'{fmri}':\n{errmsg}" + ).format(**self.__dict__) class SMFConfig(Config): - """The SMFConfig class provides SMF-based retrieval of non-structured - (one-level deep) configuration data. Property groups should be named - after property sections. Properties with list-based values should be - stored using SMF list properties.""" - - __name_re = re.compile(_SMF_name_re) - __reserved_sections = ("general", "restarter", "fs", "autofs", "ntp", - "network", "startd", "manifestfiles", "start", "stop", - "tm_common_name") - - def __init__(self, svc_fmri, definitions=misc.EmptyDict, - doorpath=None, overrides=misc.EmptyDict, version=0): - """Initializes the object. - - 'svc_fmri' is the FMRI of the SMF service to use for property - data storage and retrieval. - - 'definitions' is a dictionary of PropertySection objects indexed - by configuration version defining the initial set of property - sections, properties, and values for a Config object. - - 'doorpath' is an optional pathname indicating the location of - a door file to be used to communicate with SMF. This is - intended for use with an alternative svc.configd daemon. - - 'overrides' is an optional dictionary of property values indexed - by section name and property name. If provided, it will be used - to override any default values initially assigned during - initialization. - - 'version' is an integer value that will be used to determine - which configuration definition to use. If not provided, the - version will be based on the newest version found in - 'definitions'. - """ - # Must be set first. - self.__doorpath = doorpath - self._target = svc_fmri - - Config.__init__(self, definitions=definitions, - overrides=overrides, version=version) - - def _validate_property_name(self, name): - """Raises an exception if property name is not valid for this - class. - """ - if not self.__name_re.match(name): - raise SMFInvalidPropertyNameError(name) - - def _validate_section_name(self, name): - """Raises an exception if section name is not valid for this - class. - """ - if not self.__name_re.match(name) or \ - name in self.__reserved_sections: - raise SMFInvalidSectionNameError(name) - - def __read(self, overrides=misc.EmptyDict): - """Reads the configuration from the SMF FMRI specified at init - time. - """ - - doorpath = "" - if self.__doorpath: - doorpath = "LIBSCF_DOORPATH={0} ".format( - self.__doorpath) - - cmd = "{0}/usr/bin/svcprop -c -t {1}".format(doorpath, - self._target) - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = p.communicate() - status, result = p.returncode, misc.force_str(out) - if status: - raise SMFReadError(self._target, - "{cmd}: {result}".format(**locals())) - - cfgdata = {} + """The SMFConfig class provides SMF-based retrieval of non-structured + (one-level deep) configuration data. Property groups should be named + after property sections. Properties with list-based values should be + stored using SMF list properties.""" + + __name_re = re.compile(_SMF_name_re) + __reserved_sections = ( + "general", + "restarter", + "fs", + "autofs", + "ntp", + "network", + "startd", + "manifestfiles", + "start", + "stop", + "tm_common_name", + ) + + def __init__( + self, + svc_fmri, + definitions=misc.EmptyDict, + doorpath=None, + overrides=misc.EmptyDict, + version=0, + ): + """Initializes the object. + + 'svc_fmri' is the FMRI of the SMF service to use for property + data storage and retrieval. + + 'definitions' is a dictionary of PropertySection objects indexed + by configuration version defining the initial set of property + sections, properties, and values for a Config object. + + 'doorpath' is an optional pathname indicating the location of + a door file to be used to communicate with SMF. This is + intended for use with an alternative svc.configd daemon. + + 'overrides' is an optional dictionary of property values indexed + by section name and property name. If provided, it will be used + to override any default values initially assigned during + initialization. + + 'version' is an integer value that will be used to determine + which configuration definition to use. If not provided, the + version will be based on the newest version found in + 'definitions'. + """ + # Must be set first. + self.__doorpath = doorpath + self._target = svc_fmri + + Config.__init__( + self, definitions=definitions, overrides=overrides, version=version + ) + + def _validate_property_name(self, name): + """Raises an exception if property name is not valid for this + class. + """ + if not self.__name_re.match(name): + raise SMFInvalidPropertyNameError(name) + + def _validate_section_name(self, name): + """Raises an exception if section name is not valid for this + class. + """ + if not self.__name_re.match(name) or name in self.__reserved_sections: + raise SMFInvalidSectionNameError(name) + + def __read(self, overrides=misc.EmptyDict): + """Reads the configuration from the SMF FMRI specified at init + time. + """ + + doorpath = "" + if self.__doorpath: + doorpath = "LIBSCF_DOORPATH={0} ".format(self.__doorpath) + + cmd = "{0}/usr/bin/svcprop -c -t {1}".format(doorpath, self._target) + p = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + out, err = p.communicate() + status, result = p.returncode, misc.force_str(out) + if status: + raise SMFReadError( + self._target, "{cmd}: {result}".format(**locals()) + ) + + cfgdata = {} + prop = None + for line in result.split("\n"): + if prop is None: + prop = line + else: + prop += line + + # Output from svcprop can be spread over multiple lines + # if a property value has embedded newlines. As such, + # look for the escape sequence at the end of the string + # to determine if output should be accumulated. + if re.search(r"(^|[^\\])(\\\\)*\\$", prop): + prop += "\n" + continue + + if len(prop) < 2: + continue + n, t, v = prop.split(" ", 2) + pg, pn = n.split("/", 1) + if pg in self.__reserved_sections: + # SMF-specific groups ignored. prop = None - for line in result.split("\n"): - if prop is None: - prop = line - else: - prop += line - - # Output from svcprop can be spread over multiple lines - # if a property value has embedded newlines. As such, - # look for the escape sequence at the end of the string - # to determine if output should be accumulated. - if re.search(r"(^|[^\\])(\\\\)*\\$", prop): - prop += "\n" - continue - - if len(prop) < 2: - continue - n, t, v = prop.split(' ', 2) - pg, pn = n.split('/', 1) - if pg in self.__reserved_sections: - # SMF-specific groups ignored. - prop = None - continue - - if (t == "astring" or t == "ustring") and v == '""': - v = '' - cfgdata.setdefault(pg, {}) - cfgdata[pg][pn] = v - prop = None - - # Reset to initial state to ensure the default set of properties - # and values exists so that any values not specified by the - # saved configuration or overrides will be correct. This must - # be done after the version is determined above so that the - # saved configuration data can be merged with the correct - # configuration definition. - Config.reset(self, overrides=overrides) - - # shlex.split() automatically does escaping for a list of values - # so no need to do it here. - for section, props in six.iteritems(cfgdata): - if section == "CONFIGURATION": - # Reserved for configuration file management. - continue - for prop, value in six.iteritems(props): - if section in overrides and \ - prop in overrides[section]: - continue - - propobj = self._get_matching_property(section, - prop) - if isinstance(propobj, PropList): - nvalue = [] - for v in shlex.split(value): - try: - if six.PY2: - v = v.encode( - "ascii") - else: - v = misc.force_str( - v, "ascii") - except ValueError: - try: - v = v.decode( - "utf-8") - except ValueError: - # Permit opaque - # data. It's - # up to each - # class whether - # to allow it. - pass - nvalue.append(v) - value = nvalue - else: - # Allow shlex to unescape the value, - # but rejoin all components as one. - value = ''.join(shlex.split(value)) - - # Finally, set the property value. - try: - propobj.value = value - except PropertyConfigError as e: - if hasattr(e, "section") and \ - not e.section: - e.section = section - raise - - def reset(self, overrides=misc.EmptyDict): - """Discards current configuration state and returns the - configuration object to its initial state. - - 'overrides' is an optional dictionary of property values - indexed by section name and property name. If provided, - it will be used to override any default values initially - assigned during reset. - """ - - # Reload the configuration. - self.__read(overrides=overrides) - - if not overrides: - # Unless there were overrides, ignore any initial - # values for the purpose of determining whether a - # write should occur. This isn't strictly correct, - # but is the desired behaviour in most cases. This - # also matches the historical behaviour of the - # configuration classes used in pkg(7). - self._dirty = False - - def write(self): - """Saves the current configuration object to the target - provided at initialization. - """ - - raise SMFWriteError(self._target, _("Writing configuration " - "data to SMF is not supported at this time.")) + continue + + if (t == "astring" or t == "ustring") and v == '""': + v = "" + cfgdata.setdefault(pg, {}) + cfgdata[pg][pn] = v + prop = None + + # Reset to initial state to ensure the default set of properties + # and values exists so that any values not specified by the + # saved configuration or overrides will be correct. This must + # be done after the version is determined above so that the + # saved configuration data can be merged with the correct + # configuration definition. + Config.reset(self, overrides=overrides) + + # shlex.split() automatically does escaping for a list of values + # so no need to do it here. + for section, props in six.iteritems(cfgdata): + if section == "CONFIGURATION": + # Reserved for configuration file management. + continue + for prop, value in six.iteritems(props): + if section in overrides and prop in overrides[section]: + continue + + propobj = self._get_matching_property(section, prop) + if isinstance(propobj, PropList): + nvalue = [] + for v in shlex.split(value): + try: + if six.PY2: + v = v.encode("ascii") + else: + v = misc.force_str(v, "ascii") + except ValueError: + try: + v = v.decode("utf-8") + except ValueError: + # Permit opaque + # data. It's + # up to each + # class whether + # to allow it. + pass + nvalue.append(v) + value = nvalue + else: + # Allow shlex to unescape the value, + # but rejoin all components as one. + value = "".join(shlex.split(value)) + + # Finally, set the property value. + try: + propobj.value = value + except PropertyConfigError as e: + if hasattr(e, "section") and not e.section: + e.section = section + raise + + def reset(self, overrides=misc.EmptyDict): + """Discards current configuration state and returns the + configuration object to its initial state. + + 'overrides' is an optional dictionary of property values + indexed by section name and property name. If provided, + it will be used to override any default values initially + assigned during reset. + """ + + # Reload the configuration. + self.__read(overrides=overrides) + + if not overrides: + # Unless there were overrides, ignore any initial + # values for the purpose of determining whether a + # write should occur. This isn't strictly correct, + # but is the desired behaviour in most cases. This + # also matches the historical behaviour of the + # configuration classes used in pkg(7). + self._dirty = False + + def write(self): + """Saves the current configuration object to the target + provided at initialization. + """ + + raise SMFWriteError( + self._target, + _( + "Writing configuration " + "data to SMF is not supported at this time." + ), + ) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/cpiofile.py b/src/modules/cpiofile.py index 08c5a42d3..5ef4e613f 100644 --- a/src/modules/cpiofile.py +++ b/src/modules/cpiofile.py @@ -33,12 +33,13 @@ from __future__ import print_function -#--------- +# --------- # Imports -#--------- +# --------- import sys -if sys.version > '3': - long = int + +if sys.version > "3": + long = int import os import stat import time @@ -48,945 +49,991 @@ # cpio magic numbers # XXX matches actual cpio archives and /etc/magic, but not archives.h -CMN_ASC = 0o70701 # Cpio Magic Number for ASCII header -CMN_BIN = 0o70707 # Cpio Magic Number for Binary header -CMN_BBS = 0o143561 # Cpio Magic Number for Byte-Swap header -CMN_CRC = 0o70702 # Cpio Magic Number for CRC header -CMS_ASC = "070701" # Cpio Magic String for ASCII header -CMS_CHR = "070707" # Cpio Magic String for CHR (-c) header -CMS_CRC = "070702" # Cpio Magic String for CRC header -CMS_LEN = 6 # Cpio Magic String length +CMN_ASC = 0o70701 # Cpio Magic Number for ASCII header +CMN_BIN = 0o70707 # Cpio Magic Number for Binary header +CMN_BBS = 0o143561 # Cpio Magic Number for Byte-Swap header +CMN_CRC = 0o70702 # Cpio Magic Number for CRC header +CMS_ASC = "070701" # Cpio Magic String for ASCII header +CMS_CHR = "070707" # Cpio Magic String for CHR (-c) header +CMS_CRC = "070702" # Cpio Magic String for CRC header +CMS_LEN = 6 # Cpio Magic String length BLOCKSIZE = 512 + class CpioError(Exception): - """Base exception.""" - pass + """Base exception.""" + + pass + + class ExtractError(CpioError): - """General exception for extract errors.""" - pass + """General exception for extract errors.""" + + pass + + class ReadError(CpioError): - """Exception for unreadble cpio archives.""" - pass + """Exception for unreadble cpio archives.""" + + pass + + class CompressionError(CpioError): - """Exception for unavailable compression methods.""" - pass + """Exception for unavailable compression methods.""" + + pass + + class StreamError(CpioError): - """Exception for unsupported operations on stream-like CpioFiles.""" - pass + """Exception for unsupported operations on stream-like CpioFiles.""" -#--------------------------- + pass + + +# --------------------------- # internal stream interface -#--------------------------- +# --------------------------- class _LowLevelFile: - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, + """Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + """ + + def __init__(self, name, mode): + mode = { + "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode) + }[mode] + if hasattr(os, "O_BINARY"): + mode |= os.O_BINARY + self.fd = os.open(name, mode) - def close(self): - os.close(self.fd) + def close(self): + os.close(self.fd) - def read(self, size): - return os.read(self.fd, size) + def read(self, size): + return os.read(self.fd, size) + + def write(self, s): + os.write(self.fd, s) - def write(self, s): - os.write(self.fd, s) class _Stream: - """Class that serves as an adapter between CpioFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method and is accessed - blockwise. Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin, - sys.stdout, a socket, a tape device etc. - - _Stream is intended to be used only internally. + """Class that serves as an adapter between CpioFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + """ + + def __init__(self, name, mode, type, fileobj, bufsize): + """Construct a _Stream object.""" + self._extfileobj = True + if fileobj is None: + fileobj = _LowLevelFile(name, mode) + self._extfileobj = False + + self.name = name or "" + self.mode = mode + self.type = type + self.fileobj = fileobj + self.bufsize = bufsize + self.buf = "" + self.pos = long(0) + self.closed = False + + if type == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32("") + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() + + if type == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = "" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + + def __del__(self): + if not self.closed: + self.close() + + def _init_write_gz(self): + """Initialize for writing with gzip compression.""" + self.cmp = self.zlib.compressobj( + 9, + self.zlib.DEFLATED, + -self.zlib.MAX_WBITS, + self.zlib.DEF_MEM_LEVEL, + 0, + ) + timestamp = struct.pack(" self.bufsize: + self.fileobj.write(self.buf[: self.bufsize]) + self.buf = self.buf[self.bufsize :] + + def close(self): + """Close the _Stream object. No operation should be + done on it afterwards. """ + if self.closed: + return + + if self.mode == "w" and self.type != "cpio": + self.buf += self.cmp.flush() + if self.mode == "w" and self.buf: + self.fileobj.write(self.buf) + self.buf = "" + if self.type == "gz": + self.fileobj.write(struct.pack("= 0: + blocks, remainder = divmod(pos - self.pos, self.bufsize) + for i in range(blocks): + self.read(self.bufsize) + self.read(remainder) + else: + raise StreamError("seeking backwards is not allowed") + return self.pos + + def read(self, size=None): + """Return the next size number of bytes from the stream. + If size is not defined, return all bytes of the stream + up to EOF. + """ + if size is None: + t = [] + while True: + buf = self._read(self.bufsize) + if not buf: + break + t.append(buf) + buf = "".join(t) + else: + buf = self._read(size) + self.pos += len(buf) + # print("reading {0} bytes to {1} ({2})".format(size, self.pos, self.fileobj.tell())) + return buf + + def _read(self, size): + """Return size bytes from the stream.""" + if self.type == "cpio": + return self.__read(size) + + c = len(self.dbuf) + t = [self.dbuf] + while c < size: + buf = self.__read(self.bufsize) + if not buf: + break + buf = self.cmp.decompress(buf) + t.append(buf) + c += len(buf) + t = "".join(t) + self.dbuf = t[size:] + return t[:size] + + def __read(self, size): + """Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + """ + c = len(self.buf) + t = [self.buf] + while c < size: + buf = self.fileobj.read(self.bufsize) + if not buf: + break + t.append(buf) + c += len(buf) + t = "".join(t) + self.buf = t[size:] + return t[:size] - def __init__(self, name, mode, type, fileobj, bufsize): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - self.name = name or "" - self.mode = mode - self.type = type - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = "" - self.pos = long(0) - self.closed = False - - if type == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") - self.zlib = zlib - self.crc = zlib.crc32("") - if mode == "r": - self._init_read_gz() - else: - self._init_write_gz() - - if type == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - if mode == "r": - self.dbuf = "" - self.cmp = bz2.BZ2Decompressor() - else: - self.cmp = bz2.BZ2Compressor() - - def __del__(self): - if not self.closed: - self.close() - - def _init_write_gz(self): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) - timestamp = struct.pack(" self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - if self.mode == "w" and self.type != "cpio": - self.buf += self.cmp.flush() - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = "" - if self.type == "gz": - self.fileobj.write(struct.pack("= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size=None): - """Return the next size number of bytes from the stream. - If size is not defined, return all bytes of the stream - up to EOF. - """ - if size is None: - t = [] - while True: - buf = self._read(self.bufsize) - if not buf: - break - t.append(buf) - buf = "".join(t) - else: - buf = self._read(size) - self.pos += len(buf) - # print("reading {0} bytes to {1} ({2})".format(size, self.pos, self.fileobj.tell())) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.type == "cpio": - return self.__read(size) - - c = len(self.dbuf) - t = [self.dbuf] - while c < size: - buf = self.__read(self.bufsize) - if not buf: - break - buf = self.cmp.decompress(buf) - t.append(buf) - c += len(buf) - t = "".join(t) - self.dbuf = t[size:] - return t[:size] - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - t = [self.buf] - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - t.append(buf) - c += len(buf) - t = "".join(t) - self.buf = t[size:] - return t[:size] # class _Stream -#------------------------ + +# ------------------------ # Extraction file object -#------------------------ +# ------------------------ class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by CpioFile.extractfile(). + """File-like object for reading an archive member. + Is returned by CpioFile.extractfile(). + """ + + def __init__(self, cpiofile, cpioinfo): + self.fileobj = cpiofile.fileobj + self.name = cpioinfo.name + self.mode = "r" + self.closed = False + self.offset = cpioinfo.offset_data + self.size = cpioinfo.size + self.pos = long(0) + self.linebuffer = "" + + def read(self, size=None): + if self.closed: + raise ValueError("file is closed") + self.fileobj.seek(self.offset + self.pos) + bytesleft = self.size - self.pos + if size is None: + bytestoread = bytesleft + else: + bytestoread = min(size, bytesleft) + self.pos += bytestoread + return self.fileobj.read(bytestoread) + + def readline(self, size=-1): + """Read a line with approx. size. If size is negative, + read a whole line. readline() and read() must not + be mixed up (!). """ - - def __init__(self, cpiofile, cpioinfo): - self.fileobj = cpiofile.fileobj - self.name = cpioinfo.name - self.mode = "r" - self.closed = False - self.offset = cpioinfo.offset_data - self.size = cpioinfo.size - self.pos = long(0) - self.linebuffer = "" - - def read(self, size=None): - if self.closed: - raise ValueError("file is closed") - self.fileobj.seek(self.offset + self.pos) - bytesleft = self.size - self.pos - if size is None: - bytestoread = bytesleft - else: - bytestoread = min(size, bytesleft) - self.pos += bytestoread - return self.fileobj.read(bytestoread) - - def readline(self, size=-1): - """Read a line with approx. size. If size is negative, - read a whole line. readline() and read() must not - be mixed up (!). - """ - if size < 0: - size = sys.maxsize - + if size < 0: + size = sys.maxsize + + nl = self.linebuffer.find("\n") + if nl >= 0: + nl = min(nl, size) + else: + size -= len(self.linebuffer) + while nl < 0 and size > 0: + buf = self.read(min(size, 100)) + if not buf: + break + self.linebuffer += buf + size -= len(buf) nl = self.linebuffer.find("\n") - if nl >= 0: - nl = min(nl, size) - else: - size -= len(self.linebuffer) - while (nl < 0 and size > 0): - buf = self.read(min(size, 100)) - if not buf: - break - self.linebuffer += buf - size -= len(buf) - nl = self.linebuffer.find("\n") - if nl == -1: - s = self.linebuffer - self.linebuffer = "" - return s - buf = self.linebuffer[:nl] - self.linebuffer = self.linebuffer[nl + 1:] - while buf[-1:] == "\r": - buf = buf[:-1] - return buf + "\n" - - def readlines(self): - """Return a list with all (following) lines. - """ - result = [] - while True: - line = self.readline() - if not line: break - result.append(line) - return result - - def tell(self): - """Return the current file position. - """ - return self.pos - - def seek(self, pos, whence=0): - """Seek to a position in the file. - """ + if nl == -1: + s = self.linebuffer self.linebuffer = "" - if whence == 0: - self.pos = min(max(pos, 0), self.size) - elif whence == 1: - if pos < 0: - self.pos = max(self.pos + pos, 0) - else: - self.pos = min(self.pos + pos, self.size) - elif whence == 2: - self.pos = max(min(self.size + pos, self.size), 0) - - def close(self): - """Close the file object. - """ - self.closed = True -#class ExFileObject - -#------------------ + return s + buf = self.linebuffer[:nl] + self.linebuffer = self.linebuffer[nl + 1 :] + while buf[-1:] == "\r": + buf = buf[:-1] + return buf + "\n" + + def readlines(self): + """Return a list with all (following) lines.""" + result = [] + while True: + line = self.readline() + if not line: + break + result.append(line) + return result + + def tell(self): + """Return the current file position.""" + return self.pos + + def seek(self, pos, whence=0): + """Seek to a position in the file.""" + self.linebuffer = "" + if whence == 0: + self.pos = min(max(pos, 0), self.size) + elif whence == 1: + if pos < 0: + self.pos = max(self.pos + pos, 0) + else: + self.pos = min(self.pos + pos, self.size) + elif whence == 2: + self.pos = max(min(self.size + pos, self.size), 0) + + def close(self): + """Close the file object.""" + self.closed = True + + +# class ExFileObject + + +# ------------------ # Exported Classes -#------------------ +# ------------------ class CpioInfo(object): - """Informational class which holds the details about an - archive member given by a cpio header block. - CpioInfo objects are returned by CpioFile.getmember(), - CpioFile.getmembers() and CpioFile.getcpioinfo() and are - usually created internally. + """Informational class which holds the details about an + archive member given by a cpio header block. + CpioInfo objects are returned by CpioFile.getmember(), + CpioFile.getmembers() and CpioFile.getcpioinfo() and are + usually created internally. + """ + + def __init__(self, name="", cpiofile=None): + """Construct a CpioInfo object. name is the optional name + of the member. """ - def __init__(self, name="", cpiofile=None): - """Construct a CpioInfo object. name is the optional name - of the member. - """ - - self.name = name - self.mode = 0o666 - self.uid = 0 - self.gid = 0 - self.size = 0 - self.mtime = 0 - self.chksum = 0 - self.type = "0" - self.linkname = "" - self.uname = "user" - self.gname = "group" - self.devmajor = 0 - self.devminor = 0 - self.prefix = "" - self.cpiofile = cpiofile - - self.offset = 0 - self.offset_data = 0 - self.padding = 1 - - def __repr__(self): - return "<{0} {1!r} at {2:#x}>".format( - self.__class__.__name__, self.name, id(self)) - - @classmethod - def frombuf(cls, buf, fileobj, cpiofile=None): - """Construct a CpioInfo object from a buffer. The buffer should - be at least 6 octets long to determine the type of archive. The - rest of the data will be read in on demand. - """ - cpioinfo = cls(cpiofile=cpiofile) - - # Read enough for the ASCII magic - if buf[:6] == CMS_ASC: - hdrtype = "CMS_ASC" - elif buf[:6] == CMS_CHR: - hdrtype = "CMS_CHR" - elif buf[:6] == CMS_CRC: - hdrtype = "CMS_CRC" - else: - b = struct.unpack("h", buf[:2])[0] - if b == CMN_ASC: - hdrtype = "CMN_ASC" - elif b == CMN_BIN: - hdrtype = "CMN_BIN" - elif b == CMN_BBS: - hdrtype = "CMN_BBS" - elif b == CMN_CRC: - hdrtype = "CMN_CRC" - else: - raise ValueError("invalid cpio header") - - if hdrtype == "CMN_BIN": - buf += fileobj.read(26 - len(buf)) - (magic, dev, inode, cpioinfo.mode, cpioinfo.uid, - cpioinfo.gid, nlink, rdev, cpioinfo.mtime, namesize, - cpioinfo.size) = struct.unpack("=hhHHHHhhihi", buf[:26]) - buf += fileobj.read(namesize) - cpioinfo.name = buf[26:26 + namesize - 1] - # Header is padded to halfword boundaries - cpioinfo.padding = 2 - cpioinfo.hdrsize = 26 + namesize + (namesize % 2) - buf += fileobj.read(namesize % 2) - elif hdrtype == "CMS_ASC": - buf += fileobj.read(110 - len(buf)) - cpioinfo.mode = int(buf[14:22], 16) - cpioinfo.uid = int(buf[22:30], 16) - cpioinfo.gid = int(buf[30:38], 16) - cpioinfo.mtime = int(buf[46:54], 16) - cpioinfo.size = int(buf[54:62], 16) - cpioinfo.devmajor = int(buf[62:70], 16) - cpioinfo.devminor = int(buf[70:78], 16) - namesize = int(buf[94:102], 16) - cpioinfo.chksum = int(buf[102:110], 16) - buf += fileobj.read(namesize) - cpioinfo.name = buf[110:110 + namesize - 1] - cpioinfo.hdrsize = 110 + namesize - # Pad to the nearest 4 byte block, 0-3 bytes. - cpioinfo.hdrsize += 4 - ((cpioinfo.hdrsize - 1) % 4) - 1 - buf += fileobj.read(cpioinfo.hdrsize - 110 - namesize) - cpioinfo.padding = 4 - else: - raise ValueError("unsupported cpio header") - - return cpioinfo - - def isreg(self): - return stat.S_ISREG(self.mode) - - # This isn't in tarfile, but it's too useful. It's required - # modifications to frombuf(), as well as CpioFile.next() to pass the - # CpioFile object in. I'm not sure that isn't poor OO style. - def extractfile(self): - """Return a file-like object which can be read to extract the contents. - """ - - if self.isreg(): - return ExFileObject(self.cpiofile, self) - else: - return None - -class CpioFile(object): - """The CpioFile Class provides an interface to cpio archives. + self.name = name + self.mode = 0o666 + self.uid = 0 + self.gid = 0 + self.size = 0 + self.mtime = 0 + self.chksum = 0 + self.type = "0" + self.linkname = "" + self.uname = "user" + self.gname = "group" + self.devmajor = 0 + self.devminor = 0 + self.prefix = "" + self.cpiofile = cpiofile + + self.offset = 0 + self.offset_data = 0 + self.padding = 1 + + def __repr__(self): + return "<{0} {1!r} at {2:#x}>".format( + self.__class__.__name__, self.name, id(self) + ) + + @classmethod + def frombuf(cls, buf, fileobj, cpiofile=None): + """Construct a CpioInfo object from a buffer. The buffer should + be at least 6 octets long to determine the type of archive. The + rest of the data will be read in on demand. """ + cpioinfo = cls(cpiofile=cpiofile) + + # Read enough for the ASCII magic + if buf[:6] == CMS_ASC: + hdrtype = "CMS_ASC" + elif buf[:6] == CMS_CHR: + hdrtype = "CMS_CHR" + elif buf[:6] == CMS_CRC: + hdrtype = "CMS_CRC" + else: + b = struct.unpack("h", buf[:2])[0] + if b == CMN_ASC: + hdrtype = "CMN_ASC" + elif b == CMN_BIN: + hdrtype = "CMN_BIN" + elif b == CMN_BBS: + hdrtype = "CMN_BBS" + elif b == CMN_CRC: + hdrtype = "CMN_CRC" + else: + raise ValueError("invalid cpio header") + + if hdrtype == "CMN_BIN": + buf += fileobj.read(26 - len(buf)) + ( + magic, + dev, + inode, + cpioinfo.mode, + cpioinfo.uid, + cpioinfo.gid, + nlink, + rdev, + cpioinfo.mtime, + namesize, + cpioinfo.size, + ) = struct.unpack("=hhHHHHhhihi", buf[:26]) + buf += fileobj.read(namesize) + cpioinfo.name = buf[26 : 26 + namesize - 1] + # Header is padded to halfword boundaries + cpioinfo.padding = 2 + cpioinfo.hdrsize = 26 + namesize + (namesize % 2) + buf += fileobj.read(namesize % 2) + elif hdrtype == "CMS_ASC": + buf += fileobj.read(110 - len(buf)) + cpioinfo.mode = int(buf[14:22], 16) + cpioinfo.uid = int(buf[22:30], 16) + cpioinfo.gid = int(buf[30:38], 16) + cpioinfo.mtime = int(buf[46:54], 16) + cpioinfo.size = int(buf[54:62], 16) + cpioinfo.devmajor = int(buf[62:70], 16) + cpioinfo.devminor = int(buf[70:78], 16) + namesize = int(buf[94:102], 16) + cpioinfo.chksum = int(buf[102:110], 16) + buf += fileobj.read(namesize) + cpioinfo.name = buf[110 : 110 + namesize - 1] + cpioinfo.hdrsize = 110 + namesize + # Pad to the nearest 4 byte block, 0-3 bytes. + cpioinfo.hdrsize += 4 - ((cpioinfo.hdrsize - 1) % 4) - 1 + buf += fileobj.read(cpioinfo.hdrsize - 110 - namesize) + cpioinfo.padding = 4 + else: + raise ValueError("unsupported cpio header") + + return cpioinfo + + def isreg(self): + return stat.S_ISREG(self.mode) + + # This isn't in tarfile, but it's too useful. It's required + # modifications to frombuf(), as well as CpioFile.next() to pass the + # CpioFile object in. I'm not sure that isn't poor OO style. + def extractfile(self): + """Return a file-like object which can be read to extract the contents.""" + + if self.isreg(): + return ExFileObject(self.cpiofile, self) + else: + return None - fileobject = ExFileObject - - def __init__(self, name=None, mode="r", fileobj=None, cfobj=None): - """Open an (uncompressed) cpio archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when CpioFile is closed. - """ - self.name = name - - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - self._mode = mode - self.mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] - - if not fileobj and not cfobj: - fileobj = open(self.name, self.mode) - self._extfileobj = False - else: - # Copy constructor: just copy fileobj over and reset the - # _Stream object's idea of where we are back to the - # beginning. Everything else will be reset normally. - # XXX clear closed flag? - if cfobj: - fileobj = cfobj.fileobj - fileobj.pos = 0 - if self.name is None and hasattr(fileobj, "name"): - self.name = fileobj.name - if hasattr(fileobj, "mode"): - self.mode = fileobj.mode - self._extfileobj = True - self.fileobj = fileobj - - # Init datastructures - self.closed = False - self.members = [] # list of members as CpioInfo objects - self._loaded = False # flag if all members have been read - self.offset = long(0) # current position in the archive file - - if self._mode == "r": - self.firstmember = None - self.firstmember = next(self) - - if self._mode == "a": - # Move to the end of the archive, - # before the first empty block. - self.firstmember = None - while True: - try: - cpioinfo = next(self) - except ReadError: - self.fileobj.seek(0) - break - if cpioinfo is None: - self.fileobj.seek(- BLOCKSIZE, 1) - break - - if self._mode in "aw": - self._loaded = True - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # CpioFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass CpioFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=20*512): - """Open a cpio archive for reading, writing or appending. Return - an appropriate CpioFile class. - - mode: - 'r' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'a' or 'a:' open for appending - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - 'r|' open an uncompressed stream of cpio blocks for reading - 'r|gz' open a gzip compressed stream of cpio blocks - 'r|bz2' open a bzip2 compressed stream of cpio blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "cpio" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type {0!r}".format(comptype)) - return func(name, filemode, fileobj) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "cpio" - - if filemode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - t = cls(name, filemode, - _Stream(name, filemode, comptype, fileobj, bufsize)) - t._extfileobj = False - return t - - elif mode == "r": - # Find out which *open() is appropriate for opening the file. - for comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - try: - return func(name, "r", fileobj) - except (ReadError, CompressionError): - continue - raise ReadError("file could not be opened successfully") - - elif mode in "aw": - return cls.cpioopen(name, mode, fileobj) - - raise ValueError("undiscernible mode") - - @classmethod - def cpioopen(cls, name, mode="r", fileobj=None): - """Open uncompressed cpio archive name for reading or writing. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - return cls(name, mode, fileobj) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9): - """Open gzip compressed cpio archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - try: - import gzip - gzip.GzipFile - except (ImportError, AttributeError): - raise CompressionError("gzip module is not available") - - pre, ext = os.path.splitext(name) - pre = os.path.basename(pre) - if ext == ".gz": - ext = "" - cpioname = pre + ext - - if fileobj is None: - fileobj = open(name, mode + "b") - - if mode != "r": - name = tarname +class CpioFile(object): + """The CpioFile Class provides an interface to cpio archives.""" + + fileobject = ExFileObject + + def __init__(self, name=None, mode="r", fileobj=None, cfobj=None): + """Open an (uncompressed) cpio archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when CpioFile is closed. + """ + self.name = name + + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + self._mode = mode + self.mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] + + if not fileobj and not cfobj: + fileobj = open(self.name, self.mode) + self._extfileobj = False + else: + # Copy constructor: just copy fileobj over and reset the + # _Stream object's idea of where we are back to the + # beginning. Everything else will be reset normally. + # XXX clear closed flag? + if cfobj: + fileobj = cfobj.fileobj + fileobj.pos = 0 + if self.name is None and hasattr(fileobj, "name"): + self.name = fileobj.name + if hasattr(fileobj, "mode"): + self.mode = fileobj.mode + self._extfileobj = True + self.fileobj = fileobj + + # Init datastructures + self.closed = False + self.members = [] # list of members as CpioInfo objects + self._loaded = False # flag if all members have been read + self.offset = long(0) # current position in the archive file + + if self._mode == "r": + self.firstmember = None + self.firstmember = next(self) + + if self._mode == "a": + # Move to the end of the archive, + # before the first empty block. + self.firstmember = None + while True: try: - t = cls.cpioopen(cpioname, mode, - gzip.GzipFile(name, mode, compresslevel, - fileobj)) - except IOError: - raise ReadError("not a gzip file") - t._extfileobj = False - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9): - """Open bzip2 compressed cpio archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'.") + cpioinfo = next(self) + except ReadError: + self.fileobj.seek(0) + break + if cpioinfo is None: + self.fileobj.seek(-BLOCKSIZE, 1) + break + + if self._mode in "aw": + self._loaded = True + + # -------------------------------------------------------------------------- + # Below are the classmethods which act as alternate constructors to the + # CpioFile class. The open() method is the only one that is needed for + # public use; it is the "super"-constructor and is able to select an + # adequate "sub"-constructor for a particular compression using the mapping + # from OPEN_METH. + # + # This concept allows one to subclass CpioFile without losing the comfort of + # the super-constructor. A sub-constructor is registered and made available + # by adding it to the mapping in OPEN_METH. + @classmethod + def open(cls, name=None, mode="r", fileobj=None, bufsize=20 * 512): + """Open a cpio archive for reading, writing or appending. Return + an appropriate CpioFile class. + + mode: + 'r' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'a' or 'a:' open for appending + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'r|' open an uncompressed stream of cpio blocks for reading + 'r|gz' open a gzip compressed stream of cpio blocks + 'r|bz2' open a bzip2 compressed stream of cpio blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + """ + if not name and not fileobj: + raise ValueError("nothing to open") + + if ":" in mode: + filemode, comptype = mode.split(":", 1) + filemode = filemode or "r" + comptype = comptype or "cpio" + + # Select the *open() function according to + # given compression. + if comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) + else: + raise CompressionError( + "unknown compression type {0!r}".format(comptype) + ) + return func(name, filemode, fileobj) + + elif "|" in mode: + filemode, comptype = mode.split("|", 1) + filemode = filemode or "r" + comptype = comptype or "cpio" + + if filemode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + t = cls( + name, + filemode, + _Stream(name, filemode, comptype, fileobj, bufsize), + ) + t._extfileobj = False + return t + + elif mode == "r": + # Find out which *open() is appropriate for opening the file. + for comptype in cls.OPEN_METH: + func = getattr(cls, cls.OPEN_METH[comptype]) try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") + return func(name, "r", fileobj) + except (ReadError, CompressionError): + continue + raise ReadError("file could not be opened successfully") + + elif mode in "aw": + return cls.cpioopen(name, mode, fileobj) + + raise ValueError("undiscernible mode") + + @classmethod + def cpioopen(cls, name, mode="r", fileobj=None): + """Open uncompressed cpio archive name for reading or writing.""" + if len(mode) > 1 or mode not in "raw": + raise ValueError("mode must be 'r', 'a' or 'w'") + return cls(name, mode, fileobj) + + @classmethod + def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9): + """Open gzip compressed cpio archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'") + + try: + import gzip + + gzip.GzipFile + except (ImportError, AttributeError): + raise CompressionError("gzip module is not available") + + pre, ext = os.path.splitext(name) + pre = os.path.basename(pre) + if ext == ".gz": + ext = "" + cpioname = pre + ext + + if fileobj is None: + fileobj = open(name, mode + "b") + + if mode != "r": + name = tarname + + try: + t = cls.cpioopen( + cpioname, + mode, + gzip.GzipFile(name, mode, compresslevel, fileobj), + ) + except IOError: + raise ReadError("not a gzip file") + t._extfileobj = False + return t + + @classmethod + def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9): + """Open bzip2 compressed cpio archive name for reading or writing. + Appending is not allowed. + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + + pre, ext = os.path.splitext(name) + pre = os.path.basename(pre) + if ext == ".bz2": + ext = "" + cpioname = pre + ext + + if fileobj is not None: + raise ValueError("no support for external file objects") + + try: + t = cls.cpioopen( + cpioname, + mode, + bz2.BZ2File(name, mode, compresslevel=compresslevel), + ) + except IOError: + raise ReadError("not a bzip2 file") + t._extfileobj = False + return t + + @classmethod + def p7zopen(cls, name, mode="r", fileobj=None): + """Open 7z compressed cpio archive name for reading, writing. + + Appending is not allowed + """ + if len(mode) > 1 or mode not in "rw": + raise ValueError("mode must be 'r' or 'w'.") + + pre, ext = os.path.splitext(name) + pre = os.path.basename(pre) + if ext == ".7z": + ext = "" + cpioname = pre + ext + + try: + # To extract: 7z e -so + # To create an archive: 7z a -si + cmd = "7z {0} -{1} {2}".format( + {"r": "e", "w": "a"}[mode], {"r": "so", "w": "si"}[mode], name + ) + p = subprocess.Popen( + cmd.split(), + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + pobj = p.stdout + if mode == "w": + pobj = p.stdin + + comptype = "cpio" + bufsize = 20 * 512 + + obj = _Stream(cpioname, mode, comptype, pobj, bufsize) + t = cls.cpioopen(cpioname, mode, obj) + except IOError: + raise ReadError("read/write via 7z failed") + t._extfileobj = False + return t + + # All *open() methods are registered here. + OPEN_METH = { + "cpio": "cpioopen", # uncompressed + "gz": "gzopen", # gzip compressed + "bz2": "bz2open", # bzip2 compressed + "p7z": "p7zopen", # 7z compressed + } + + def getmember(self, name): + """Return a CpioInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurence is assumed to be the + most up-to-date version. + """ + cpioinfo = self._getmember(name) + if cpioinfo is None: + raise KeyError("filename {0!r} not found".format(name)) + return cpioinfo + + def getmembers(self): + """Return the members of the archive as a list of CpioInfo objects. The + list has the same order as the members in the archive. + """ + self._check() + if not self._loaded: # if we want to obtain a list of + self._load() # all members, we first have to + # scan the whole archive. + return self.members + + def __next__(self): + self._check("ra") + if self.firstmember is not None: + m = self.firstmember + self.firstmember = None + return m + + self.fileobj.seek(self.offset) + while True: + # Read in enough for frombuf() to be able to determine + # what kind of archive it is. It will have to read the + # rest of the header. + buf = self.fileobj.read(6) + if not buf: + return None + try: + cpioinfo = CpioInfo.frombuf(buf, self.fileobj, self) + except ValueError as e: + if self.offset == 0: + raise ReadError("empty, unreadable or compressed file") + return None + break + + # if cpioinfo.chksum != calc_chksum(buf): + # self._dbg(1, "cpiofile: Bad Checksum {0!r}".format(cpioinfo.name)) + + cpioinfo.offset = self.offset + + cpioinfo.offset_data = self.offset + cpioinfo.hdrsize + if cpioinfo.isreg() or cpioinfo.type not in ( + 0, + ): # XXX SUPPORTED_TYPES? + self.offset += cpioinfo.hdrsize + cpioinfo.size + if self.offset % cpioinfo.padding != 0: + self.offset += cpioinfo.padding - ( + self.offset % cpioinfo.padding + ) + + if cpioinfo.name == "TRAILER!!!": + return None + + self.members.append(cpioinfo) + return cpioinfo + + next = __next__ + + def extractfile(self, member): + self._check("r") + + if isinstance(member, CpioInfo): + cpioinfo = member + else: + cpioinfo = self.getmember(member) + + if cpioinfo.isreg(): + return self.fileobject(self, cpioinfo) + # XXX deal with other types + else: + return None + + def _block(self, count): + blocks, remainder = divmod(count, BLOCKSIZE) + if remainder: + blocks += 1 + return blocks * BLOCKSIZE + + def _getmember(self, name, cpioinfo=None): + members = self.getmembers() + + if cpioinfo is None: + end = len(members) + else: + end = members.index(cpioinfo) + + for i in range(end - 1, -1, -1): + if name == members[i].name: + return members[i] + + def _load(self): + while True: + cpioinfo = next(self) + if cpioinfo is None: + break + self._loaded = True + + def _check(self, mode=None): + if self.closed: + raise IOError("{0} is closed".format(self.__class__.__name__)) + if mode is not None and self._mode not in mode: + raise IOError("bad operation for mode {0!r}".format(self._mode)) + + def __iter__(self): + if self._loaded: + return iter(self.members) + else: + return CpioIter(self) + + def find_next_archive(self, padding=512): + """Find the next cpio archive glommed on to the end of the current one. + + Some applications, like Solaris package datastreams, concatenate + multiple cpio archives together, separated by a bit of padding. + This routine puts all the file pointers in position to start + reading from the next archive, which can be done by creating a + new CpioFile object given the original one as an argument (after + this routine is called). + """ - pre, ext = os.path.splitext(name) - pre = os.path.basename(pre) - if ext == ".bz2": - ext = "" - cpioname = pre + ext + bytes = 0 + if self.fileobj.tell() % padding != 0: + bytes = padding - self.fileobj.tell() % padding + self.fileobj.seek(self.fileobj.tell() + bytes) + self.offset += bytes - if fileobj is not None: - raise ValueError("no support for external file objects") + def get_next_archive(self, padding=512): + """Return the next cpio archive glommed on to the end of the current one. - try: - t = cls.cpioopen(cpioname, mode, - bz2.BZ2File(name, mode, compresslevel=compresslevel)) - except IOError: - raise ReadError("not a bzip2 file") - t._extfileobj = False - return t - - @classmethod - def p7zopen(cls, name, mode="r", fileobj=None): - """Open 7z compressed cpio archive name for reading, writing. - - Appending is not allowed - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'.") - - pre, ext = os.path.splitext(name) - pre = os.path.basename(pre) - if ext == ".7z": - ext = "" - cpioname = pre + ext + Return the CpioFile object based on the repositioning done by + find_next_archive(). + """ - try: - # To extract: 7z e -so - # To create an archive: 7z a -si - cmd = "7z {0} -{1} {2}".format( - {'r':'e', 'w':'a'}[mode], - {'r':'so', 'w':'si'}[mode], - name) - p = subprocess.Popen(cmd.split(), - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - pobj = p.stdout - if mode == "w": - pobj = p.stdin - - comptype = "cpio" - bufsize = 20*512 - - obj = _Stream(cpioname, mode, comptype, pobj, bufsize) - t = cls.cpioopen(cpioname, mode, obj) - except IOError: - raise ReadError("read/write via 7z failed") - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "cpio": "cpioopen", # uncompressed - "gz": "gzopen", # gzip compressed - "bz2": "bz2open", # bzip2 compressed - "p7z": "p7zopen" # 7z compressed - } - - def getmember(self, name): - """Return a CpioInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurence is assumed to be the - most up-to-date version. - """ - cpioinfo = self._getmember(name) - if cpioinfo is None: - raise KeyError("filename {0!r} not found".format(name)) - return cpioinfo - - def getmembers(self): - """Return the members of the archive as a list of CpioInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def __next__(self): - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - self.fileobj.seek(self.offset) - while True: - # Read in enough for frombuf() to be able to determine - # what kind of archive it is. It will have to read the - # rest of the header. - buf = self.fileobj.read(6) - if not buf: - return None - try: - cpioinfo = CpioInfo.frombuf(buf, self.fileobj, self) - except ValueError as e: - if self.offset == 0: - raise ReadError("empty, unreadable or compressed file") - return None - break - - # if cpioinfo.chksum != calc_chksum(buf): - # self._dbg(1, "cpiofile: Bad Checksum {0!r}".format(cpioinfo.name)) - - cpioinfo.offset = self.offset - - cpioinfo.offset_data = self.offset + cpioinfo.hdrsize - if cpioinfo.isreg() or cpioinfo.type not in (0,): # XXX SUPPORTED_TYPES? - self.offset += cpioinfo.hdrsize + cpioinfo.size - if self.offset % cpioinfo.padding != 0: - self.offset += cpioinfo.padding - \ - (self.offset % cpioinfo.padding) - - if cpioinfo.name == "TRAILER!!!": - return None - - self.members.append(cpioinfo) - return cpioinfo - - next = __next__ - - def extractfile(self, member): - self._check("r") - - if isinstance(member, CpioInfo): - cpioinfo = member - else: - cpioinfo = self.getmember(member) - - if cpioinfo.isreg(): - return self.fileobject(self, cpioinfo) - # XXX deal with other types - else: - return None - - def _block(self, count): - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def _getmember(self, name, cpioinfo=None): - members = self.getmembers() + self.find_next_archive(padding) + return CpioFile(cfobj=self) - if cpioinfo is None: - end = len(members) - else: - end = members.index(cpioinfo) - - for i in range(end - 1, -1, -1): - if name == members[i].name: - return members[i] - - def _load(self): - while True: - cpioinfo = next(self) - if cpioinfo is None: - break - self._loaded = True - - def _check(self, mode=None): - if self.closed: - raise IOError("{0} is closed".format( - self.__class__.__name__)) - if mode is not None and self._mode not in mode: - raise IOError("bad operation for mode {0!r}".format( - self._mode)) - - def __iter__(self): - if self._loaded: - return iter(self.members) - else: - return CpioIter(self) - - def find_next_archive(self, padding=512): - """Find the next cpio archive glommed on to the end of the current one. - - Some applications, like Solaris package datastreams, concatenate - multiple cpio archives together, separated by a bit of padding. - This routine puts all the file pointers in position to start - reading from the next archive, which can be done by creating a - new CpioFile object given the original one as an argument (after - this routine is called). - """ - - bytes = 0 - if self.fileobj.tell() % padding != 0: - bytes = padding - self.fileobj.tell() % padding - self.fileobj.seek(self.fileobj.tell() + bytes) - self.offset += bytes - - def get_next_archive(self, padding=512): - """Return the next cpio archive glommed on to the end of the current one. - - Return the CpioFile object based on the repositioning done by - find_next_archive(). - """ - - self.find_next_archive(padding) - return CpioFile(cfobj=self) class CpioIter: - def __init__(self, cpiofile): - self.cpiofile = cpiofile - self.index = 0 - - def __iter__(self): - return self - - def __next__(self): - if not self.cpiofile._loaded: - cpioinfo = next(self.cpiofile) - if not cpioinfo: - self.cpiofile._loaded = True - raise StopIteration - else: - try: - cpioinfo = self.cpiofile.members[self.index] - except IndexError: - raise StopIteration - self.index += 1 - return cpioinfo - - next = __next__ + def __init__(self, cpiofile): + self.cpiofile = cpiofile + self.index = 0 + + def __iter__(self): + return self + + def __next__(self): + if not self.cpiofile._loaded: + cpioinfo = next(self.cpiofile) + if not cpioinfo: + self.cpiofile._loaded = True + raise StopIteration + else: + try: + cpioinfo = self.cpiofile.members[self.index] + except IndexError: + raise StopIteration + self.index += 1 + return cpioinfo + + next = __next__ + def is_cpiofile(name): + magic = open(name).read(CMS_LEN) - magic = open(name).read(CMS_LEN) + if magic in (CMS_ASC, CMS_CHR, CMS_CRC): + return True + elif struct.unpack("h", magic[:2])[0] in ( + CMN_ASC, + CMN_BIN, + CMN_BBS, + CMN_CRC, + ): + return True - if magic in (CMS_ASC, CMS_CHR, CMS_CRC): - return True - elif struct.unpack("h", magic[:2])[0] in \ - (CMN_ASC, CMN_BIN, CMN_BBS, CMN_CRC): - return True + return False - return False if __name__ == "__main__": - print(is_cpiofile(sys.argv[1])) - - cf = CpioFile.open(sys.argv[1]) - print("cpiofile is:", cf) - - for ci in cf: - print("cpioinfo is:", ci) - print(" mode: {:04o}".format(ci.mode)) - print(" uid:", ci.uid) - print(" gid:", ci.gid) - print(" mtime:", ci.mtime, "({0})".format( - time.ctime(ci.mtime))) - print(" size:", ci.size) - print(" name:", ci.name) - # f = cf.extractfile(ci) - # for l in f.readlines(): - # print(l, end=" ") - # f.close() + print(is_cpiofile(sys.argv[1])) + + cf = CpioFile.open(sys.argv[1]) + print("cpiofile is:", cf) + + for ci in cf: + print("cpioinfo is:", ci) + print(" mode: {:04o}".format(ci.mode)) + print(" uid:", ci.uid) + print(" gid:", ci.gid) + print(" mtime:", ci.mtime, "({0})".format(time.ctime(ci.mtime))) + print(" size:", ci.size) + print(" name:", ci.name) + # f = cf.extractfile(ci) + # for l in f.readlines(): + # print(l, end=" ") + # f.close() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/dependency.py b/src/modules/dependency.py index 215aa35bf..f115a030b 100644 --- a/src/modules/dependency.py +++ b/src/modules/dependency.py @@ -28,37 +28,34 @@ OPTIONAL = 1 INCORPORATE = 10 + class Dependency(object): - """A Dependency object is a relationship between one Package and - another. It is a bidirectional expression. + """A Dependency object is a relationship between one Package and + another. It is a bidirectional expression. + + A package may require a minimum version of another package.""" - A package may require a minimum version of another package.""" + def __init__(self, host_pkg_fmri, req_pkg_fmri, type=REQUIRE): + self.host_pkg_fmri = host_pkg_fmri + self.req_pkg_fmri = req_pkg_fmri - def __init__(self, host_pkg_fmri, req_pkg_fmri, type = REQUIRE): - self.host_pkg_fmri = host_pkg_fmri - self.req_pkg_fmri = req_pkg_fmri + assert type == REQUIRE or type == INCORPORATE or type == OPTIONAL - assert type == REQUIRE \ - or type == INCORPORATE \ - or type == OPTIONAL + self.type = type - self.type = type + def satisfied(self, pkg_fmri): + # compare pkg_fmri to req_pkg_fmri + # compare versions + return False - def satisfied(self, pkg_fmri): - # compare pkg_fmri to req_pkg_fmri - # compare versions - return False + def __repr__(self): + if self.type == REQUIRE: + return "{0} => {1}".format(self.host_pkg_fmri, self.req_pkg_fmri) + elif self.type == OPTIONAL: + return "{0} o> {1}".format(self.host_pkg_fmri, self.req_pkg_fmri) + elif self.type == INCORPORATE: + return "{0} >> {1}".format(self.host_pkg_fmri, self.req_pkg_fmri) - def __repr__(self): - if self.type == REQUIRE: - return "{0} => {1}".format( - self.host_pkg_fmri, self.req_pkg_fmri) - elif self.type == OPTIONAL: - return "{0} o> {1}".format( - self.host_pkg_fmri, self.req_pkg_fmri) - elif self.type == INCORPORATE: - return "{0} >> {1}".format( - self.host_pkg_fmri, self.req_pkg_fmri) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/depotcontroller.py b/src/modules/depotcontroller.py index 88afabb16..543410479 100755 --- a/src/modules/depotcontroller.py +++ b/src/modules/depotcontroller.py @@ -40,560 +40,573 @@ import pkg.pkgsubprocess as subprocess import pkg.server.repository as sr + class DepotStateException(Exception): + def __init__(self, reason): + Exception.__init__(self, reason) - def __init__(self, reason): - Exception.__init__(self, reason) class DepotController(object): + HALTED = 0 + STARTING = 1 + RUNNING = 2 + + def __init__(self, wrapper_start=None, wrapper_end="", env=None): + self.__add_content = False + self.__auto_port = True + self.__cfg_file = None + self.__debug_features = {} + self.__depot_handle = None + self.__depot_path = "/usr/lib/pkg.depotd" + self.__depot_content_root = None + self.__dir = None + self.__disable_ops = None + self.__exit_ready = False + self.__file_root = None + self.__logpath = "/tmp/depot.log" + self.__mirror = False + self.__output = None + self.__address = None + self.__port = -1 + self.__props = {} + self.__readonly = False + self.__rebuild = False + self.__refresh_index = False + self.__state = self.HALTED + self.__writable_root = None + self.__sort_file_max_size = None + self.__ssl_dialog = None + self.__ssl_cert_file = None + self.__ssl_key_file = None + self.__starttime = 0 + self.__wrapper_start = [] + self.__wrapper_end = wrapper_end + self.__env = {} + self.__nasty = None + self.__nasty_sleep = None + if wrapper_start: + self.__wrapper_start = wrapper_start + if env: + self.__env = env + # + # Enable special unit-test depot mode in which it doesn't + # do its normal double-fork, providing us good control + # over the process. + # + self.__env["PKGDEPOT_CONTROLLER"] = "1" + return + + def get_wrapper(self): + return self.__wrapper_start, self.__wrapper_end + + def set_wrapper(self, start, end): + self.__wrapper_start = start + self.__wrapper_end = end + + def unset_wrapper(self): + self.__wrapper_start = [] + self.__wrapper_end = "" + + def set_depotd_path(self, path): + self.__depot_path = path + + def set_depotd_content_root(self, path): + self.__depot_content_root = path + + def get_depotd_content_root(self): + return self.__depot_content_root + + def set_auto_port(self): + self.__auto_port = True + + def set_address(self, address): + self.__address = address + + def get_address(self): + return self.__address + + def set_port(self, port): + self.__auto_port = False + self.__port = port + + def get_port(self): + return self.__port + + def clear_property(self, section, prop): + del self.__props[section][prop] + + def set_property(self, section, prop, value): + self.__props.setdefault(section, {}) + self.__props[section][prop] = value + + def get_property(self, section, prop): + return self.__props.get(section, {}).get(prop) + + def set_file_root(self, f_root): + self.__file_root = f_root + + def get_file_root(self): + return self.__file_root + + def set_repodir(self, repodir): + self.__dir = repodir + + def get_repodir(self): + return self.__dir + + def get_repo(self, auto_create=False): + if auto_create: + try: + sr.repository_create(self.__dir) + except sr.RepositoryExistsError: + # Already exists, nothing to do. + pass + return sr.Repository( + cfgpathname=self.__cfg_file, + root=self.__dir, + writable_root=self.__writable_root, + ) - HALTED = 0 - STARTING = 1 - RUNNING = 2 - - def __init__(self, wrapper_start=None, wrapper_end="", env=None): - self.__add_content = False - self.__auto_port = True - self.__cfg_file = None - self.__debug_features = {} - self.__depot_handle = None - self.__depot_path = "/usr/lib/pkg.depotd" - self.__depot_content_root = None - self.__dir = None - self.__disable_ops = None - self.__exit_ready = False - self.__file_root = None - self.__logpath = "/tmp/depot.log" - self.__mirror = False - self.__output = None - self.__address = None - self.__port = -1 - self.__props = {} - self.__readonly = False - self.__rebuild = False - self.__refresh_index = False - self.__state = self.HALTED - self.__writable_root = None - self.__sort_file_max_size = None - self.__ssl_dialog = None - self.__ssl_cert_file = None - self.__ssl_key_file = None - self.__starttime = 0 - self.__wrapper_start = [] - self.__wrapper_end = wrapper_end - self.__env = {} - self.__nasty = None - self.__nasty_sleep = None - if wrapper_start: - self.__wrapper_start = wrapper_start - if env: - self.__env = env - # - # Enable special unit-test depot mode in which it doesn't - # do its normal double-fork, providing us good control - # over the process. - # - self.__env["PKGDEPOT_CONTROLLER"] = "1" - return - - def get_wrapper(self): - return self.__wrapper_start, self.__wrapper_end + def get_repo_url(self): + return urlunparse(("file", "", pathname2url(self.__dir), "", "", "")) - def set_wrapper(self, start, end): - self.__wrapper_start = start - self.__wrapper_end = end + def set_readonly(self): + self.__readonly = True - def unset_wrapper(self): - self.__wrapper_start = [] - self.__wrapper_end = "" + def set_readwrite(self): + self.__readonly = False - def set_depotd_path(self, path): - self.__depot_path = path + def set_mirror(self): + self.__mirror = True - def set_depotd_content_root(self, path): - self.__depot_content_root = path + def unset_mirror(self): + self.__mirror = False - def get_depotd_content_root(self): - return self.__depot_content_root + def set_rebuild(self): + self.__rebuild = True - def set_auto_port(self): - self.__auto_port = True + def set_norebuild(self): + self.__rebuild = False - def set_address(self, address): - self.__address = address + def set_exit_ready(self): + self.__exit_ready = True - def get_address(self): - return self.__address + def set_add_content(self): + self.__add_content = True - def set_port(self, port): - self.__auto_port = False - self.__port = port + def set_logpath(self, logpath): + self.__logpath = logpath - def get_port(self): - return self.__port + def get_logpath(self): + return self.__logpath - def clear_property(self, section, prop): - del self.__props[section][prop] + def set_refresh_index(self): + self.__refresh_index = True - def set_property(self, section, prop, value): - self.__props.setdefault(section, {}) - self.__props[section][prop] = value + def set_norefresh_index(self): + self.__refresh_index = False - def get_property(self, section, prop): - return self.__props.get(section, {}).get(prop) + def get_state(self): + return self.__state - def set_file_root(self, f_root): - self.__file_root = f_root + def set_cfg_file(self, f): + self.__cfg_file = f - def get_file_root(self): - return self.__file_root + def get_cfg_file(self): + return self.__cfg_file - def set_repodir(self, repodir): - self.__dir = repodir + def get_depot_url(self): + if self.__address: + host = self.__address + if ":" in host: + # Special syntax needed for IPv6 addresses. + host = "[{0}]".format(host) + else: + host = "localhost" - def get_repodir(self): - return self.__dir + if self.__ssl_key_file: + scheme = "https" + else: + scheme = "http" - def get_repo(self, auto_create=False): - if auto_create: - try: - sr.repository_create(self.__dir) - except sr.RepositoryExistsError: - # Already exists, nothing to do. - pass - return sr.Repository(cfgpathname=self.__cfg_file, - root=self.__dir, writable_root=self.__writable_root) + return "{0}://{1}:{2:d}".format(scheme, host, self.__port) - def get_repo_url(self): - return urlunparse(("file", "", pathname2url( - self.__dir), "", "", "")) + def set_writable_root(self, wr): + self.__writable_root = wr - def set_readonly(self): - self.__readonly = True + def get_writable_root(self): + return self.__writable_root - def set_readwrite(self): - self.__readonly = False + def set_sort_file_max_size(self, sort): + self.__sort_file_max_size = sort - def set_mirror(self): - self.__mirror = True + def get_sort_file_max_size(self): + return self.__sort_file_max_size - def unset_mirror(self): - self.__mirror = False + def set_debug_feature(self, feature): + self.__debug_features[feature] = True - def set_rebuild(self): - self.__rebuild = True + def unset_debug_feature(self, feature): + del self.__debug_features[feature] - def set_norebuild(self): - self.__rebuild = False + def set_disable_ops(self, ops): + self.__disable_ops = ops - def set_exit_ready(self): - self.__exit_ready = True + def unset_disable_ops(self): + self.__disable_ops = None - def set_add_content(self): - self.__add_content = True + def set_nasty(self, nastiness): + """Set the nastiness level of the depot. Also works on + running depots.""" + self.__nasty = nastiness + if self.__depot_handle != None: + nastyurl = urljoin( + self.get_depot_url(), "nasty/{0:d}".format(self.__nasty) + ) + url = urlopen(nastyurl) + url.close() - def set_logpath(self, logpath): - self.__logpath = logpath + def get_nasty(self): + return self.__nasty - def get_logpath(self): - return self.__logpath + def set_nasty_sleep(self, sleep): + self.__nasty_sleep = sleep - def set_refresh_index(self): - self.__refresh_index = True + def get_nasty_sleep(self): + return self.__nasty_sleep - def set_norefresh_index(self): - self.__refresh_index = False + def enable_ssl(self, key_path=None, cert_path=None, dialog=None): + self.__ssl_key_file = key_path + self.__ssl_cert_file = cert_path + self.__ssl_dialog = dialog - def get_state(self): - return self.__state + def disable_ssl(self): + self.__ssl_key_file = None + self.__ssl_cert_file = None + self.__ssl_dialog = None - def set_cfg_file(self, f): - self.__cfg_file = f + def __network_ping(self): + try: + repourl = urljoin(self.get_depot_url(), "versions/0") + # Disable SSL peer verification, we just want to check + # if the depot is running. + url = urlopen(repourl, context=ssl._create_unverified_context()) + url.close() + except HTTPError as e: + # Server returns NOT_MODIFIED if catalog is up + # to date + if e.code == http_client.NOT_MODIFIED: + return True + else: + return False + except URLError as e: + return False + return True + + def is_alive(self): + """First, check that the depot process seems to be alive. + Then make a little HTTP request to see if the depot is + responsive to requests""" + + if self.__depot_handle == None: + return False + + status = self.__depot_handle.poll() + if status != None: + return False + return self.__network_ping() + + @property + def started(self): + """Return a boolean value indicating whether a depot process + has been started using this depotcontroller.""" + + return self.__depot_handle != None + + def get_args(self): + """Return the equivalent command line invocation (as an + array) for the depot as currently configured.""" + + args = [] + + # The depot may fork off children of its own, so we place + # them all together in a process group. This allows us to + # nuke everything later on. + args.append("setpgrp") + args.extend(self.__wrapper_start[:]) + args.append(sys.executable) + args.append(self.__depot_path) + if self.__depot_content_root: + args.append("--content-root") + args.append(self.__depot_content_root) + if self.__address: + args.append("-a") + args.append("{0}".format(self.__address)) + if self.__port != -1: + args.append("-p") + args.append("{0:d}".format(self.__port)) + if self.__dir != None: + args.append("-d") + args.append(self.__dir) + if self.__file_root != None: + args.append("--file-root={0}".format(self.__file_root)) + if self.__readonly: + args.append("--readonly") + if self.__rebuild: + args.append("--rebuild") + if self.__mirror: + args.append("--mirror") + if self.__refresh_index: + args.append("--refresh-index") + if self.__add_content: + args.append("--add-content") + if self.__exit_ready: + args.append("--exit-ready") + if self.__cfg_file: + args.append("--cfg-file={0}".format(self.__cfg_file)) + if self.__ssl_cert_file: + args.append("--ssl-cert-file={0}".format(self.__ssl_cert_file)) + if self.__ssl_key_file: + args.append("--ssl-key-file={0}".format(self.__ssl_key_file)) + if self.__ssl_dialog: + args.append("--ssl-dialog={0}".format(self.__ssl_dialog)) + if self.__debug_features: + args.append("--debug={0}".format(",".join(self.__debug_features))) + if self.__disable_ops: + args.append( + "--disable-ops={0}".format(",".join(self.__disable_ops)) + ) + if self.__nasty: + args.append("--nasty {0:d}".format(self.__nasty)) + if self.__nasty_sleep: + args.append("--nasty-sleep {0:d}".format(self.__nasty_sleep)) + for section in self.__props: + for prop, val in six.iteritems(self.__props[section]): + args.append( + "--set-property={0}.{1}='{2}'".format(section, prop, val) + ) + if self.__writable_root: + args.append("--writable-root={0}".format(self.__writable_root)) + + if self.__sort_file_max_size: + args.append( + "--sort-file-max-size={0}".format(self.__sort_file_max_size) + ) + + # Always log access and error information. + args.append("--log-access=stdout") + args.append("--log-errors=stderr") + args.append(self.__wrapper_end) + + return args + + def __initial_start(self): + """'env_arg' can be a dictionary of additional os.environ + entries to use when starting the depot.""" + + if self.__state != self.HALTED: + raise DepotStateException("Depot already starting or " "running") + + # XXX what about stdin and stdout redirection? + args = self.get_args() + + if self.__network_ping(): + raise DepotStateException( + "A depot (or some " + + "other network process) seems to be " + + "running on port {0:d} already!".format(self.__port) + ) + + self.__state = self.STARTING + + # Unbuffer is only allowed in binary mode. + self.__output = open(self.__logpath, "wb", 0) + # Use shlex to re-parse args. + pargs = shlex.split(" ".join(args)) + + newenv = os.environ.copy() + newenv.update(self.__env) + self.__depot_handle = subprocess.Popen( + pargs, + env=newenv, + stdin=subprocess.PIPE, + stdout=self.__output, + stderr=self.__output, + close_fds=True, + ) + if self.__depot_handle == None: + raise DepotStateException("Could not start Depot") + self.__starttime = time.time() + self.__output.close() + + def start(self): + try: + self.__initial_start() - def get_cfg_file(self): - return self.__cfg_file + if self.__refresh_index: + return - def get_depot_url(self): - if self.__address: - host = self.__address - if ":" in host: - # Special syntax needed for IPv6 addresses. - host = "[{0}]".format(host) - else: - host = "localhost" + begintime = time.time() + + sleeptime = 0.0 + check_interval = 0.20 + contact = False + while (time.time() - begintime) <= 40.0: + rc = self.__depot_handle.poll() + if rc is not None: + err = "" + with open(self.__logpath, "rb", 0) as errf: + err = errf.read() + raise DepotStateException( + "Depot exited " + "with exit code {0:d} unexpectedly " + "while starting. Output follows:\n" + "{1}\n".format(rc, err) + ) + + if self.is_alive(): + contact = True + break + time.sleep(check_interval) + if contact == False: + self.kill() + self.__state = self.HALTED + raise DepotStateException( + "Depot did not respond to " + "repeated attempts to make contact" + ) - if self.__ssl_key_file: - scheme = "https" - else: - scheme = "http" + self.__state = self.RUNNING + except KeyboardInterrupt: + if self.__depot_handle: + self.kill(now=True) + raise - return "{0}://{1}:{2:d}".format(scheme, host, self.__port) + def start_expected_fail(self, exit=2): + try: + self.__initial_start() + + sleeptime = 0.05 + died = False + rc = None + while sleeptime <= 10.0: + rc = self.__depot_handle.poll() + if rc is not None: + died = True + break + time.sleep(sleeptime) + sleeptime *= 2 + + if died and rc == exit: + self.__state = self.HALTED + return True + else: + self.stop() + return False + except KeyboardInterrupt: + if self.__depot_handle: + self.kill(now=True) + raise - def set_writable_root(self, wr): - self.__writable_root = wr + def refresh(self): + if self.__depot_handle == None: + # XXX might want to remember and return saved + # exit status + return 0 - def get_writable_root(self): - return self.__writable_root + os.kill(self.__depot_handle.pid, signal.SIGUSR1) + return self.__depot_handle.poll() - def set_sort_file_max_size(self, sort): - self.__sort_file_max_size = sort + def kill(self, now=False): + """kill the depot; letting it live for + a little while helps get reliable death""" - def get_sort_file_max_size(self): - return self.__sort_file_max_size - - def set_debug_feature(self, feature): - self.__debug_features[feature] = True - - def unset_debug_feature(self, feature): - del self.__debug_features[feature] - - def set_disable_ops(self, ops): - self.__disable_ops = ops - - def unset_disable_ops(self): - self.__disable_ops = None - - def set_nasty(self, nastiness): - """Set the nastiness level of the depot. Also works on - running depots.""" - self.__nasty = nastiness - if self.__depot_handle != None: - nastyurl = urljoin(self.get_depot_url(), - "nasty/{0:d}".format(self.__nasty)) - url = urlopen(nastyurl) - url.close() + if self.__depot_handle == None: + # XXX might want to remember and return saved + # exit status + return 0 - def get_nasty(self): - return self.__nasty + try: + lifetime = time.time() - self.__starttime + if now == False and lifetime < 1.0: + time.sleep(1.0 - lifetime) + + finally: + # By sticking in this finally: block we ensure that + # even if the kill gets ctrl-c'd, we'll at least take + # a good final whack at the depot by killing -9 its + # process group. + try: + os.kill(-1 * self.__depot_handle.pid, signal.SIGKILL) + except OSError: + pass + self.__state = self.HALTED + self.__depot_handle.wait() + self.__depot_handle = None - def set_nasty_sleep(self, sleep): - self.__nasty_sleep = sleep - - def get_nasty_sleep(self): - return self.__nasty_sleep - - def enable_ssl(self, key_path=None, cert_path=None, dialog=None): - self.__ssl_key_file = key_path - self.__ssl_cert_file = cert_path - self.__ssl_dialog = dialog - - def disable_ssl(self): - self.__ssl_key_file = None - self.__ssl_cert_file = None - self.__ssl_dialog = None - - def __network_ping(self): - try: - repourl = urljoin(self.get_depot_url(), - "versions/0") - # Disable SSL peer verification, we just want to check - # if the depot is running. - url = urlopen(repourl, - context=ssl._create_unverified_context()) - url.close() - except HTTPError as e: - # Server returns NOT_MODIFIED if catalog is up - # to date - if e.code == http_client.NOT_MODIFIED: - return True - else: - return False - except URLError as e: - return False - return True + def stop(self): + if self.__state == self.HALTED: + raise DepotStateException("Depot already stopped") - def is_alive(self): - """ First, check that the depot process seems to be alive. - Then make a little HTTP request to see if the depot is - responsive to requests """ - - if self.__depot_handle == None: - return False - - status = self.__depot_handle.poll() - if status != None: - return False - return self.__network_ping() - - @property - def started(self): - """ Return a boolean value indicating whether a depot process - has been started using this depotcontroller. """ - - return self.__depot_handle != None - - def get_args(self): - """ Return the equivalent command line invocation (as an - array) for the depot as currently configured. """ - - args = [] - - # The depot may fork off children of its own, so we place - # them all together in a process group. This allows us to - # nuke everything later on. - args.append("setpgrp") - args.extend(self.__wrapper_start[:]) - args.append(sys.executable) - args.append(self.__depot_path) - if self.__depot_content_root: - args.append("--content-root") - args.append(self.__depot_content_root) - if self.__address: - args.append("-a") - args.append("{0}".format(self.__address)) - if self.__port != -1: - args.append("-p") - args.append("{0:d}".format(self.__port)) - if self.__dir != None: - args.append("-d") - args.append(self.__dir) - if self.__file_root != None: - args.append("--file-root={0}".format(self.__file_root)) - if self.__readonly: - args.append("--readonly") - if self.__rebuild: - args.append("--rebuild") - if self.__mirror: - args.append("--mirror") - if self.__refresh_index: - args.append("--refresh-index") - if self.__add_content: - args.append("--add-content") - if self.__exit_ready: - args.append("--exit-ready") - if self.__cfg_file: - args.append("--cfg-file={0}".format(self.__cfg_file)) - if self.__ssl_cert_file: - args.append("--ssl-cert-file={0}".format(self.__ssl_cert_file)) - if self.__ssl_key_file: - args.append("--ssl-key-file={0}".format(self.__ssl_key_file)) - if self.__ssl_dialog: - args.append("--ssl-dialog={0}".format(self.__ssl_dialog)) - if self.__debug_features: - args.append("--debug={0}".format(",".join( - self.__debug_features))) - if self.__disable_ops: - args.append("--disable-ops={0}".format(",".join( - self.__disable_ops))) - if self.__nasty: - args.append("--nasty {0:d}".format(self.__nasty)) - if self.__nasty_sleep: - args.append("--nasty-sleep {0:d}".format(self.__nasty_sleep)) - for section in self.__props: - for prop, val in six.iteritems(self.__props[section]): - args.append("--set-property={0}.{1}='{2}'".format( - section, prop, val)) - if self.__writable_root: - args.append("--writable-root={0}".format(self.__writable_root)) - - if self.__sort_file_max_size: - args.append("--sort-file-max-size={0}".format(self.__sort_file_max_size)) - - # Always log access and error information. - args.append("--log-access=stdout") - args.append("--log-errors=stderr") - args.append(self.__wrapper_end) - - return args - - def __initial_start(self): - """'env_arg' can be a dictionary of additional os.environ - entries to use when starting the depot.""" - - if self.__state != self.HALTED: - raise DepotStateException("Depot already starting or " - "running") - - # XXX what about stdin and stdout redirection? - args = self.get_args() - - if self.__network_ping(): - raise DepotStateException("A depot (or some " + - "other network process) seems to be " + - "running on port {0:d} already!".format(self.__port)) - - self.__state = self.STARTING - - # Unbuffer is only allowed in binary mode. - self.__output = open(self.__logpath, "wb", 0) - # Use shlex to re-parse args. - pargs = shlex.split(" ".join(args)) - - newenv = os.environ.copy() - newenv.update(self.__env) - self.__depot_handle = subprocess.Popen(pargs, env=newenv, - stdin=subprocess.PIPE, - stdout=self.__output, - stderr=self.__output, - close_fds=True) - if self.__depot_handle == None: - raise DepotStateException("Could not start Depot") - self.__starttime = time.time() - self.__output.close() - - def start(self): - - try: - self.__initial_start() - - if self.__refresh_index: - return - - begintime = time.time() - - sleeptime = 0.0 - check_interval = 0.20 - contact = False - while (time.time() - begintime) <= 40.0: - rc = self.__depot_handle.poll() - if rc is not None: - err = "" - with open(self.__logpath, "rb", 0) as \ - errf: - err = errf.read() - raise DepotStateException("Depot exited " - "with exit code {0:d} unexpectedly " - "while starting. Output follows:\n" - "{1}\n".format(rc, err)) - - if self.is_alive(): - contact = True - break - time.sleep(check_interval) - if contact == False: - self.kill() - self.__state = self.HALTED - raise DepotStateException("Depot did not respond to " - "repeated attempts to make contact") - - self.__state = self.RUNNING - except KeyboardInterrupt: - if self.__depot_handle: - self.kill(now=True) - raise - - def start_expected_fail(self, exit=2): - try: - self.__initial_start() - - sleeptime = 0.05 - died = False - rc = None - while sleeptime <= 10.0: - - rc = self.__depot_handle.poll() - if rc is not None: - died = True - break - time.sleep(sleeptime) - sleeptime *= 2 - - if died and rc == exit: - self.__state = self.HALTED - return True - else: - self.stop() - return False - except KeyboardInterrupt: - if self.__depot_handle: - self.kill(now=True) - raise - - def refresh(self): - if self.__depot_handle == None: - # XXX might want to remember and return saved - # exit status - return 0 - - os.kill(self.__depot_handle.pid, signal.SIGUSR1) - return self.__depot_handle.poll() - - def kill(self, now=False): - """kill the depot; letting it live for - a little while helps get reliable death""" - - if self.__depot_handle == None: - # XXX might want to remember and return saved - # exit status - return 0 - - try: - lifetime = time.time() - self.__starttime - if now == False and lifetime < 1.0: - time.sleep(1.0 - lifetime) - - finally: - # By sticking in this finally: block we ensure that - # even if the kill gets ctrl-c'd, we'll at least take - # a good final whack at the depot by killing -9 its - # process group. - try: - os.kill(-1 * self.__depot_handle.pid, - signal.SIGKILL) - except OSError: - pass - self.__state = self.HALTED - self.__depot_handle.wait() - self.__depot_handle = None - - def stop(self): - if self.__state == self.HALTED: - raise DepotStateException("Depot already stopped") - - return self.kill() + return self.kill() def test_func(testdir): - dc = DepotController() - dc.set_port(22222) + dc = DepotController() + dc.set_port(22222) + try: + os.mkdir(testdir) + except OSError: + pass + + dc.set_repodir(testdir) + + for j in range(0, 10): + print( + "{0:>4d}: Starting Depot... ({1})".format( + j, " ".join(dc.get_args()) + ), + end=" ", + ) try: - os.mkdir(testdir) - except OSError: + dc.start() + print(" Done. ", end=" ") + print("... Ping ", end=" ") + sys.stdout.flush() + time.sleep(0.2) + while dc.is_alive() == False: pass + print("... Done. ", end=" ") + + print("Stopping Depot...", end=" ") + status = dc.stop() + if status is None: + print(" Result: Exited {0}".format(status), end=" ") + elif status == 0: + print(" Done.", end=" ") + elif status < 0: + print(" Result: Signal {0:d}".format(-1 * status), end=" ") + else: + print(" Result: Exited {0:d}".format(status), end=" ") + print() + f = open("/tmp/depot.log", "r") + print(f.read()) + f.close() + except KeyboardInterrupt: + print("\nKeyboard Interrupt: Cleaning up Depots...") + dc.stop() + raise - dc.set_repodir(testdir) - - for j in range(0, 10): - print("{0:>4d}: Starting Depot... ({1})".format( - j, " ".join(dc.get_args())), end=" ") - try: - dc.start() - print(" Done. ", end=" ") - print("... Ping ", end=" ") - sys.stdout.flush() - time.sleep(0.2) - while dc.is_alive() == False: - pass - print("... Done. ", end=" ") - - print("Stopping Depot...", end=" ") - status = dc.stop() - if status is None: - print(" Result: Exited {0}".format(status), end=" ") - elif status == 0: - print(" Done.", end=" ") - elif status < 0: - print(" Result: Signal {0:d}".format(-1 * status), end=" ") - else: - print(" Result: Exited {0:d}".format(status), end=" ") - print() - f = open("/tmp/depot.log", "r") - print(f.read()) - f.close() - except KeyboardInterrupt: - print("\nKeyboard Interrupt: Cleaning up Depots...") - dc.stop() - raise if __name__ == "__main__": - __testdir = "/tmp/depotcontrollertest.{0:d}".format(os.getpid()) - try: - test_func(__testdir) - except KeyboardInterrupt: - pass - os.system("rm -fr {0}".format(__testdir)) - print("\nDone") + __testdir = "/tmp/depotcontrollertest.{0:d}".format(os.getpid()) + try: + test_func(__testdir) + except KeyboardInterrupt: + pass + os.system("rm -fr {0}".format(__testdir)) + print("\nDone") # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/digest.py b/src/modules/digest.py index 74942062e..0ee30847d 100644 --- a/src/modules/digest.py +++ b/src/modules/digest.py @@ -28,10 +28,11 @@ import six try: - import pkg.sha512_t - sha512_supported = True + import pkg.sha512_t + + sha512_supported = True except ImportError: - sha512_supported = False + sha512_supported = False # When running the test suite, we alter our behaviour depending on certain # debug flags. @@ -86,15 +87,15 @@ RANKED_HASHES = [] if DebugValues["hash"]: - _hashes = reversed(DebugValues["hash"].split("+")) + _hashes = reversed(DebugValues["hash"].split("+")) else: - _hashes = ("sha512t_256", "sha256", "sha1") + _hashes = ("sha512t_256", "sha256", "sha1") for alg in _hashes: - if alg == "sha512t_256": - if not sha512_supported: - continue - RANKED_HASHES.append(alg) + if alg == "sha512t_256": + if not sha512_supported: + continue + RANKED_HASHES.append(alg) PREFERRED_HASH = RANKED_HASHES[0] REVERSE_RANKED_HASHES = RANKED_HASHES[::-1] @@ -106,18 +107,20 @@ DEFAULT_CHAIN_CHASH_ATTRS = [] if "sha1" in RANKED_HASHES: - DEFAULT_HASH_ATTRS.append("hash") - DEFAULT_CHASH_ATTRS.append("chash") - DEFAULT_GELF_HASH_ATTRS.append("elfhash") - DEFAULT_CHAIN_ATTRS.append("chain") - DEFAULT_CHAIN_CHASH_ATTRS.append("chain.chashes") + DEFAULT_HASH_ATTRS.append("hash") + DEFAULT_CHASH_ATTRS.append("chash") + DEFAULT_GELF_HASH_ATTRS.append("elfhash") + DEFAULT_CHAIN_ATTRS.append("chain") + DEFAULT_CHAIN_CHASH_ATTRS.append("chain.chashes") if PREFERRED_HASH != "sha1": - DEFAULT_HASH_ATTRS.append("pkg.content-hash") - DEFAULT_CHASH_ATTRS.append("pkg.content-hash") - DEFAULT_GELF_HASH_ATTRS.append("pkg.content-hash") - DEFAULT_CHAIN_ATTRS.append("pkg.chain.{0}".format(PREFERRED_HASH)) - DEFAULT_CHAIN_CHASH_ATTRS.append("pkg.chain.chashes.{0}".format(PREFERRED_HASH)) + DEFAULT_HASH_ATTRS.append("pkg.content-hash") + DEFAULT_CHASH_ATTRS.append("pkg.content-hash") + DEFAULT_GELF_HASH_ATTRS.append("pkg.content-hash") + DEFAULT_CHAIN_ATTRS.append("pkg.chain.{0}".format(PREFERRED_HASH)) + DEFAULT_CHAIN_CHASH_ATTRS.append( + "pkg.chain.chashes.{0}".format(PREFERRED_HASH) + ) UNSIGNED_GELF_HASH_MAP = { "gelf:" + PREFERRED_HASH: "gelf.unsigned:" + PREFERRED_HASH @@ -148,306 +151,321 @@ # Dictionaries of the pkg(7) hash and content-hash attributes we know about. if DebugValues["hash"] == "sha1": - # Simulate older non-SHA2 aware pkg(7) code - HASH_ALGS = {"hash": hashlib.sha1} - GELF_HASH_ALGS = {"elfhash": hashlib.sha1} + # Simulate older non-SHA2 aware pkg(7) code + HASH_ALGS = {"hash": hashlib.sha1} + GELF_HASH_ALGS = {"elfhash": hashlib.sha1} else: - HASH_ALGS = { - "hash": hashlib.sha1, - "pkg.hash.sha256": hashlib.sha256, - "file:sha256": hashlib.sha256, - "gzip:sha256": hashlib.sha256, - } - - GELF_HASH_ALGS = { - "elfhash": hashlib.sha1, - "gelf:sha256": hashlib.sha256, - "file:sha256": hashlib.sha256 - } - - if sha512_supported: - HASH_ALGS["pkg.hash.sha512t_256"] = pkg.sha512_t.SHA512_t - HASH_ALGS["file:sha512t_256"] = pkg.sha512_t.SHA512_t - HASH_ALGS["gzip:sha512t_256"] = pkg.sha512_t.SHA512_t - GELF_HASH_ALGS["gelf:sha512t_256"] = pkg.sha512_t.SHA512_t - GELF_HASH_ALGS["file:sha512t_256"] = pkg.sha512_t.SHA512_t + HASH_ALGS = { + "hash": hashlib.sha1, + "pkg.hash.sha256": hashlib.sha256, + "file:sha256": hashlib.sha256, + "gzip:sha256": hashlib.sha256, + } + + GELF_HASH_ALGS = { + "elfhash": hashlib.sha1, + "gelf:sha256": hashlib.sha256, + "file:sha256": hashlib.sha256, + } + + if sha512_supported: + HASH_ALGS["pkg.hash.sha512t_256"] = pkg.sha512_t.SHA512_t + HASH_ALGS["file:sha512t_256"] = pkg.sha512_t.SHA512_t + HASH_ALGS["gzip:sha512t_256"] = pkg.sha512_t.SHA512_t + GELF_HASH_ALGS["gelf:sha512t_256"] = pkg.sha512_t.SHA512_t + GELF_HASH_ALGS["file:sha512t_256"] = pkg.sha512_t.SHA512_t # A dictionary of the compressed hash attributes we know about. CHASH_ALGS = {} for key in HASH_ALGS: - CHASH_ALGS[key.replace("hash", "chash")] = HASH_ALGS[key] + CHASH_ALGS[key.replace("hash", "chash")] = HASH_ALGS[key] # A dictionary of signature action chain hash attributes we know about. CHAIN_ALGS = {} for key in HASH_ALGS: - CHAIN_ALGS[key.replace("hash", "chain")] = HASH_ALGS[key] + CHAIN_ALGS[key.replace("hash", "chain")] = HASH_ALGS[key] # A dictionary of signature action chain chash attributes we know about. CHAIN_CHASH_ALGS = {} for key in HASH_ALGS: - CHAIN_CHASH_ALGS[key.replace("hash", "chain.chashes")] = HASH_ALGS[key] + CHAIN_CHASH_ALGS[key.replace("hash", "chain.chashes")] = HASH_ALGS[key] + +ALL_HASH_ATTRS = ( + DEFAULT_HASH_ATTRS + DEFAULT_CHASH_ATTRS + DEFAULT_GELF_HASH_ATTRS +) -ALL_HASH_ATTRS = (DEFAULT_HASH_ATTRS + DEFAULT_CHASH_ATTRS + - DEFAULT_GELF_HASH_ATTRS) def is_hash_attr(attr_name): - """Tells whether or not the named attribute contains a hash value.""" + """Tells whether or not the named attribute contains a hash value.""" + + return attr_name in ALL_HASH_ATTRS - return attr_name in ALL_HASH_ATTRS def _get_hash_dics(hash_type): - """Based on the 'hash_type', return a tuple describing the ranking of - hash attributes from "most preferred" to "least preferred" and a mapping - of those attributes to the hash algorithms that are used to - compute those attributes. - - If 'reverse' is true, return the rank_tuple in reverse order, from least - preferred hash to most preferred hash. - """ - - if hash_type == HASH: - hash_attrs = DEFAULT_HASH_ATTRS - hash_dic = HASH_ALGS - elif hash_type == CHASH: - hash_attrs = DEFAULT_CHASH_ATTRS - hash_dic = CHASH_ALGS - elif hash_type == HASH_GELF: - hash_attrs = DEFAULT_GELF_HASH_ATTRS - hash_dic = GELF_HASH_ALGS - elif hash_type == CHAIN: - hash_attrs = DEFAULT_CHAIN_ATTRS - hash_dic = CHAIN_ALGS - elif hash_type == CHAIN_CHASH: - hash_attrs = DEFAULT_CHAIN_CHASH_ATTRS - hash_dic = CHAIN_CHASH_ALGS - else: - hash_attrs = None - hash_dic = None - - return hash_attrs, hash_dic + """Based on the 'hash_type', return a tuple describing the ranking of + hash attributes from "most preferred" to "least preferred" and a mapping + of those attributes to the hash algorithms that are used to + compute those attributes. + + If 'reverse' is true, return the rank_tuple in reverse order, from least + preferred hash to most preferred hash. + """ + + if hash_type == HASH: + hash_attrs = DEFAULT_HASH_ATTRS + hash_dic = HASH_ALGS + elif hash_type == CHASH: + hash_attrs = DEFAULT_CHASH_ATTRS + hash_dic = CHASH_ALGS + elif hash_type == HASH_GELF: + hash_attrs = DEFAULT_GELF_HASH_ATTRS + hash_dic = GELF_HASH_ALGS + elif hash_type == CHAIN: + hash_attrs = DEFAULT_CHAIN_ATTRS + hash_dic = CHAIN_ALGS + elif hash_type == CHAIN_CHASH: + hash_attrs = DEFAULT_CHAIN_CHASH_ATTRS + hash_dic = CHAIN_CHASH_ALGS + else: + hash_attrs = None + hash_dic = None + + return hash_attrs, hash_dic class ContentHash(dict): - """This class breaks out the stringified tuples from - pkg.content-hash + """This class breaks out the stringified tuples from + pkg.content-hash - "extract_method:hash_alg:hash_val" + "extract_method:hash_alg:hash_val" - into a dict with entries + into a dict with entries - "extract_method:hash_alg": "hash_val" - """ - def __init__(self, vals): - dict.__init__(self) + "extract_method:hash_alg": "hash_val" + """ - if isinstance(vals, six.string_types): - vals = (vals,) + def __init__(self, vals): + dict.__init__(self) - for v in vals: - vs = v.rsplit(":", 1) - self[vs[0]] = vs[1] + if isinstance(vals, six.string_types): + vals = (vals,) + + for v in vals: + vs = v.rsplit(":", 1) + self[vs[0]] = vs[1] def get_preferred_hash(action, hash_type=HASH, reversed=False): - """Returns a tuple of the form (hash_attr, hash_val, hash_func) - where 'hash_attr' is the preferred hash attribute name, 'hash_val' - is the preferred hash value, and 'hash_func' is the function - used to compute the preferred hash based on the available - pkg.content-hash or pkg.*hash.* attributes declared in the action.""" - - hash_attrs, hash_dic = _get_hash_dics(hash_type) - if not (hash_attrs and hash_dic): - raise ValueError("Unknown hash_type {0} passed to " - "get_preferred_hash".format(hash_type)) - - if hash_type == HASH_GELF: - extract_method = EXTRACT_GELF - elif hash_type == CHASH: - extract_method = EXTRACT_GZIP - else: - extract_method = EXTRACT_FILE - if reversed: - ranked_hashes = REVERSE_RANKED_HASHES - else: - ranked_hashes = RANKED_HASHES - - for alg in ranked_hashes: - if alg == "sha1": - # The corresponding hash attr should be in the - # first position if "sha1" is enabled. - attr = hash_attrs[0] - if not action: - return attr, None, hash_dic[attr] - if hash_type == HASH: - if action.hash: - return attr, action.hash, hash_dic[attr] - else: - if attr in action.attrs: - return (attr, action.attrs[attr], - hash_dic[attr]) - elif hash_type in (HASH, HASH_GELF, CHASH): - # Currently only HASH, HASH_GELF and CHASH support - # pkg.content-hash. - ch_type = "{0}:{1}".format(extract_method, alg) - attr = "pkg.content-hash" - if not action: - return attr, None, hash_dic[ch_type] - if attr in action.attrs: - ch = ContentHash(action.attrs[attr]) - if ch_type in ch: - return (attr, ch[ch_type], - hash_dic[ch_type]) - elif hash_type in (CHAIN, CHAIN_CHASH): - # The corresponding hash attr should be in the - # last position if sha2 or higher algorithm is enabled. - attr = hash_attrs[-1] - if attr in action.attrs: - return attr, action.attrs[attr], hash_dic[attr] - - # fallback to the default hash member since it's not in action.attrs - if hash_type == HASH: - return None, action.hash, hashlib.sha1 - # an action can legitimately have no chash - if hash_type == CHASH: - return None, None, hashlib.sha1 - # an action can legitimately have no GELF content-hash if it's not a - # file type we know about - if hash_type == HASH_GELF: - return None, None, None - # an action can legitimately have no chain - if hash_type == CHAIN: - return None, None, None - # an action can legitimately have no chain_chash - if hash_type == CHAIN_CHASH: - return None, None, None - - # This should never happen. - if reversed: - raise Exception("Error determining the least preferred hash " - "for {0} {1}".format(action, hash_type)) - else: - raise Exception("Error determining the preferred hash for " - "{0} {1}".format(action, hash_type)) + """Returns a tuple of the form (hash_attr, hash_val, hash_func) + where 'hash_attr' is the preferred hash attribute name, 'hash_val' + is the preferred hash value, and 'hash_func' is the function + used to compute the preferred hash based on the available + pkg.content-hash or pkg.*hash.* attributes declared in the action.""" + + hash_attrs, hash_dic = _get_hash_dics(hash_type) + if not (hash_attrs and hash_dic): + raise ValueError( + "Unknown hash_type {0} passed to " + "get_preferred_hash".format(hash_type) + ) + + if hash_type == HASH_GELF: + extract_method = EXTRACT_GELF + elif hash_type == CHASH: + extract_method = EXTRACT_GZIP + else: + extract_method = EXTRACT_FILE + if reversed: + ranked_hashes = REVERSE_RANKED_HASHES + else: + ranked_hashes = RANKED_HASHES + + for alg in ranked_hashes: + if alg == "sha1": + # The corresponding hash attr should be in the + # first position if "sha1" is enabled. + attr = hash_attrs[0] + if not action: + return attr, None, hash_dic[attr] + if hash_type == HASH: + if action.hash: + return attr, action.hash, hash_dic[attr] + else: + if attr in action.attrs: + return (attr, action.attrs[attr], hash_dic[attr]) + elif hash_type in (HASH, HASH_GELF, CHASH): + # Currently only HASH, HASH_GELF and CHASH support + # pkg.content-hash. + ch_type = "{0}:{1}".format(extract_method, alg) + attr = "pkg.content-hash" + if not action: + return attr, None, hash_dic[ch_type] + if attr in action.attrs: + ch = ContentHash(action.attrs[attr]) + if ch_type in ch: + return (attr, ch[ch_type], hash_dic[ch_type]) + elif hash_type in (CHAIN, CHAIN_CHASH): + # The corresponding hash attr should be in the + # last position if sha2 or higher algorithm is enabled. + attr = hash_attrs[-1] + if attr in action.attrs: + return attr, action.attrs[attr], hash_dic[attr] + + # fallback to the default hash member since it's not in action.attrs + if hash_type == HASH: + return None, action.hash, hashlib.sha1 + # an action can legitimately have no chash + if hash_type == CHASH: + return None, None, hashlib.sha1 + # an action can legitimately have no GELF content-hash if it's not a + # file type we know about + if hash_type == HASH_GELF: + return None, None, None + # an action can legitimately have no chain + if hash_type == CHAIN: + return None, None, None + # an action can legitimately have no chain_chash + if hash_type == CHAIN_CHASH: + return None, None, None + + # This should never happen. + if reversed: + raise Exception( + "Error determining the least preferred hash " + "for {0} {1}".format(action, hash_type) + ) + else: + raise Exception( + "Error determining the preferred hash for " + "{0} {1}".format(action, hash_type) + ) def get_least_preferred_hash(action, hash_type=HASH): - """Returns a tuple of the least preferred hash attribute name, the hash - value that should result when we compute the hash, and the function used - to compute the hash based on the available hash and pkg.*hash.* - attributes declared in the action.""" - - return get_preferred_hash(action, hash_type=hash_type, reversed=True) - - -def get_common_preferred_hash(action, old_action, hash_type=HASH, - cmp_policy=None): - """Returns the most preferred hash attribute of those present - on a new action and/or an installed (old) version of that - action. We return the name of the hash attribute, the new and - original values of that attribute, and the function used - to compute the hash. - - The pkg.content-hash attribute may be multi-valued. When - selecting this attribute, a secondary selection will be made - based on a ranked list of value prefixes. The most preferred - value will then be returned. - - Normally, payload comparisons should only be made based on - hashes that include signatures in the extracted data. This - constraint can be relaxed by setting cmp_policy=CMP_UNSIGNED. In - this case, the most preferred hash will be selected first, and - then we'll check for unsigned versions of that hash on both - actions. When both actions have that unsigned hash, its values - will be returned in place of the signed values. - - If no common attribute is found, we fallback to the legacy - .hash member assuming it is not None for the new and - orig actions, and specify hashlib.sha1 as the algorithm. If no - 'hash' member is set, we return a tuple of None objects. - - """ - - if not old_action: - return None, None, None, None - - hash_attrs, hash_dic = _get_hash_dics(hash_type) - if hash_type == HASH_GELF: - extract_method = EXTRACT_GELF - elif hash_type == CHASH: - extract_method = EXTRACT_GZIP - else: - extract_method = EXTRACT_FILE - - if not (hash_attrs and hash_dic): - raise ValueError("Unknown hash_type {0} passed to " - "get_preferred_common_hash".format(hash_type)) - - new_hashes = frozenset(a for a in action.attrs if a in hash_attrs) - old_hashes = frozenset(a for a in old_action.attrs if a in hash_attrs) - - all_hashes = new_hashes | old_hashes - - for alg in RANKED_HASHES: - if alg == "sha1": - attr = hash_attrs[0] - # The corresponding hash attr should be in the - # first position if "sha1" is enabled. - if attr not in all_hashes: - continue - new_hash = action.attrs.get(attr) - old_hash = old_action.attrs.get(attr) - return attr, new_hash, old_hash, hash_dic[attr] - elif hash_type in (HASH, HASH_GELF, CHASH): - # Currently only HASH, HASH_GELF and CHASH support - # pkg.content-hash. - attr = "pkg.content-hash" - if attr not in all_hashes: - continue - nh = ContentHash( - action.attrs.get(attr, {})) - oh = ContentHash( - old_action.attrs.get(attr, {})) - new_types = frozenset(nh) - old_types = frozenset(oh) - all_types = new_types | old_types - - ch_type = "{0}:{1}".format(extract_method, alg) - if ch_type not in all_types: - continue - - new_hash = nh.get(ch_type) - old_hash = oh.get(ch_type) - - # Here we've matched a ranked hash type in at - # least one of the pkg.content-hash value - # sets, so we know we'll be returning. If - # we're allowing comparison on unsigned hash - # values, and both value sets have this hash - # type, and both value sets have a - # corresponding unsigned hash, swap in those - # unsigned hash values. - from pkg.misc import CMP_UNSIGNED - if (cmp_policy == CMP_UNSIGNED and new_hash and - old_hash and ch_type in UNSIGNED_GELF_HASH_MAP): - ut = UNSIGNED_GELF_HASH_MAP[ch_type] - if ut in nh and ut in oh: - new_hash = nh[ut] - old_hash = oh[ut] - - return attr, new_hash, old_hash, hash_dic.get(ch_type) - elif hash_type in (CHAIN, CHAIN_CHASH): - # The corresponding hash attr should be in the - # last position if sha2 or higher algorithm is enabled. - attr = hash_attrs[-1] - if attr not in all_hashes: - continue - new_hash = action.attrs.get(attr) - old_hash = old_action.attrs.get(attr) - return attr, new_hash, old_hash, hash_dic[attr] - - if action.hash and old_action.hash: - return None, action.hash, old_action.hash, hashlib.sha1 + """Returns a tuple of the least preferred hash attribute name, the hash + value that should result when we compute the hash, and the function used + to compute the hash based on the available hash and pkg.*hash.* + attributes declared in the action.""" + + return get_preferred_hash(action, hash_type=hash_type, reversed=True) + + +def get_common_preferred_hash( + action, old_action, hash_type=HASH, cmp_policy=None +): + """Returns the most preferred hash attribute of those present + on a new action and/or an installed (old) version of that + action. We return the name of the hash attribute, the new and + original values of that attribute, and the function used + to compute the hash. + + The pkg.content-hash attribute may be multi-valued. When + selecting this attribute, a secondary selection will be made + based on a ranked list of value prefixes. The most preferred + value will then be returned. + + Normally, payload comparisons should only be made based on + hashes that include signatures in the extracted data. This + constraint can be relaxed by setting cmp_policy=CMP_UNSIGNED. In + this case, the most preferred hash will be selected first, and + then we'll check for unsigned versions of that hash on both + actions. When both actions have that unsigned hash, its values + will be returned in place of the signed values. + + If no common attribute is found, we fallback to the legacy + .hash member assuming it is not None for the new and + orig actions, and specify hashlib.sha1 as the algorithm. If no + 'hash' member is set, we return a tuple of None objects. + + """ + + if not old_action: return None, None, None, None + hash_attrs, hash_dic = _get_hash_dics(hash_type) + if hash_type == HASH_GELF: + extract_method = EXTRACT_GELF + elif hash_type == CHASH: + extract_method = EXTRACT_GZIP + else: + extract_method = EXTRACT_FILE + + if not (hash_attrs and hash_dic): + raise ValueError( + "Unknown hash_type {0} passed to " + "get_preferred_common_hash".format(hash_type) + ) + + new_hashes = frozenset(a for a in action.attrs if a in hash_attrs) + old_hashes = frozenset(a for a in old_action.attrs if a in hash_attrs) + + all_hashes = new_hashes | old_hashes + + for alg in RANKED_HASHES: + if alg == "sha1": + attr = hash_attrs[0] + # The corresponding hash attr should be in the + # first position if "sha1" is enabled. + if attr not in all_hashes: + continue + new_hash = action.attrs.get(attr) + old_hash = old_action.attrs.get(attr) + return attr, new_hash, old_hash, hash_dic[attr] + elif hash_type in (HASH, HASH_GELF, CHASH): + # Currently only HASH, HASH_GELF and CHASH support + # pkg.content-hash. + attr = "pkg.content-hash" + if attr not in all_hashes: + continue + nh = ContentHash(action.attrs.get(attr, {})) + oh = ContentHash(old_action.attrs.get(attr, {})) + new_types = frozenset(nh) + old_types = frozenset(oh) + all_types = new_types | old_types + + ch_type = "{0}:{1}".format(extract_method, alg) + if ch_type not in all_types: + continue + + new_hash = nh.get(ch_type) + old_hash = oh.get(ch_type) + + # Here we've matched a ranked hash type in at + # least one of the pkg.content-hash value + # sets, so we know we'll be returning. If + # we're allowing comparison on unsigned hash + # values, and both value sets have this hash + # type, and both value sets have a + # corresponding unsigned hash, swap in those + # unsigned hash values. + from pkg.misc import CMP_UNSIGNED + + if ( + cmp_policy == CMP_UNSIGNED + and new_hash + and old_hash + and ch_type in UNSIGNED_GELF_HASH_MAP + ): + ut = UNSIGNED_GELF_HASH_MAP[ch_type] + if ut in nh and ut in oh: + new_hash = nh[ut] + old_hash = oh[ut] + + return attr, new_hash, old_hash, hash_dic.get(ch_type) + elif hash_type in (CHAIN, CHAIN_CHASH): + # The corresponding hash attr should be in the + # last position if sha2 or higher algorithm is enabled. + attr = hash_attrs[-1] + if attr not in all_hashes: + continue + new_hash = action.attrs.get(attr) + old_hash = old_action.attrs.get(attr) + return attr, new_hash, old_hash, hash_dic[attr] + + if action.hash and old_action.hash: + return None, action.hash, old_action.hash, hashlib.sha1 + return None, None, None, None + + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/facet.py b/src/modules/facet.py index 0e66436b7..43bce8c17 100644 --- a/src/modules/facet.py +++ b/src/modules/facet.py @@ -37,501 +37,501 @@ from pkg._varcet import _allow_facet from pkg.misc import EmptyI, ImmutableDict + class Facets(dict): - # store information on facets; subclass dict - # and maintain ordered list of keys sorted - # by length. - - # subclass __getitem__ so that queries w/ - # actual facets find match - - # - # For image planning purposes and to be able to compare facet objects - # deterministically, facets must be sorted. They are first sorted by - # source (more details below), then by length, then lexically. - # - # Facets can come from three different sources. - # - # SYSTEM facets are facets whose values are assigned by the system. - # These are usually facets defined in packages which are not set in an - # image, and the value assigned by the system is always true. These - # facets will usually never be found in a Facets dictionary. (Facets - # dictionaries only contain facets which are explicitly set.) - # - # LOCAL facets are facets which have been set locally in an image - # using pkg(1) or the pkg api. Explicitly set LOCAL facets are stored - # in Facets.__local. Facets which are not explicitly set but match an - # explicitly set LOCAL facet glob pattern are also considered to be - # LOCAL. - # - # PARENT facets are facets which are inherited from a parent image. - # they are managed internally by the packaging subsystem. Explicitly - # inherited facets are stored in Facets.__inherited. Facets which are - # not explicitly set but match an explicitly set PARENT facet glob - # pattern are also considered to be PARENT. - # - # When evaluating facets, all PARENT facets are evaluated before LOCAL - # facets. This is done by ensuring that all PARENT facets come before - # any LOCAL facets in __keylist. This is done because PARENT facets - # exist to propagate faceted dependencies between linked images, which - # is needed to ensure the solver can run successfully. ie, if a - # parent image relaxes dependencies via facet version-locks, then the - # child needs to inherit those facets since otherwise it is more - # constrained in possible solutions than it's parent and likely won't - # be able to plan an update that keeps it in sync with it's parent. - # - # Sine PARENT facets take priority over LOCAL facets, it's possible to - # have conflicts between the two. In the case where a facet is both - # inherited and set locally, both values are preserved, but the - # inherited value masks the local value. Users can list and update - # local values while they are masked using pkg(1), but as long as the - # values are masked they will not affect image planning operations. - # Once an inherited facet that masks a local facet is removed, the - # local facet will be restored. - # - - FACET_SRC_SYSTEM = "system" - FACET_SRC_LOCAL = "local" - FACET_SRC_PARENT = "parent" - - def __init__(self, init=EmptyI): - dict.__init__(self) - self.__keylist = [] - self.__res = {} - self.__local = {} - self.__local_ro = None - self.__inherited = {} - self.__inherited_ro = None - - # initialize ourselves - self.update(init) - - @staticmethod - def getstate(obj, je_state=None): - """Returns the serialized state of this object in a format - that that can be easily stored using JSON, pickle, etc.""" - - return [ - [misc.force_text(k), v, True] - for k, v in six.iteritems(obj.__inherited) - ] + [ - [misc.force_text(k), v, False] - for k, v in six.iteritems(obj.__local) - ] - - @staticmethod - def fromstate(state, jd_state=None): - """Update the state of this object using previously serialized - state obtained via getstate().""" - - rv = Facets() - for k, v, inhertited in state: - if not inhertited: - rv[k] = v - else: - rv._set_inherited(k, v) - return rv - - def _cmp_priority(self, other): - """Compare the facet match priority of two Facets objects. - Since the match priority of a Facets object is dependent upon - facet sources (local vs parent) and names, we're essentially - ensuring that both objects have the same set of facet sources - and names.""" - - assert type(other) is Facets - return misc.cmp(self.__keylist, other.__keylist) - - def _cmp_values(self, other): - """Compare the facet values of two Facets objects. This - comparison ignores any masked values.""" - - assert type(other) is Facets - return misc.cmp(self, other) - - def _cmp_all_values(self, other): - """Compare all the facet values of two Facets objects. This - comparison takes masked values into account.""" - - assert type(other) is Facets - rv = misc.cmp(self.__inherited, other.__inherited) - if rv == 0: - rv = misc.cmp(self.__local, other.__local) - return rv - - # this __cmp__ is used as a helper function for the rich comparison - # methods. - # __cmp__ defined; pylint: disable=W1630 - def __cmp__(self, other): - """Compare two Facets objects. This comparison takes masked - values into account.""" - - # check if we're getting compared against something other than - # another Factes object. - if type(other) is not Facets: - return 1 - - # Check for effective facet value changes that could affect - # solver computations. - rv = self._cmp_values(other) - if rv != 0: - return rv - - # Check for facet priority changes that could affect solver - # computations. (Priority changes can occur when local or - # inherited facets are added or removed.) - rv = self._cmp_priority(other) - if rv != 0: - return rv - - # There are no outwardly visible facet priority or value - # changes that could affect solver computations, but it's - # still possible that we're changing the set of local or - # inherited facets in a way that doesn't affect solver - # computations. For example: we could be adding a local - # facet with a value that is masked by an inherited facet, or - # having a facet transition from being inherited to being - # local without a priority or value change. Check if this is - # the case. - rv = self._cmp_all_values(other) - return rv - - def __hash__(self): - return hash(str(self)) - - def __eq__(self, other): - """redefine in terms of __cmp__()""" - return (Facets.__cmp__(self, other) == 0) - - def __ne__(self, other): - """redefine in terms of __cmp__()""" - return (Facets.__cmp__(self, other) != 0) - - def __ge__(self, other): - """redefine in terms of __cmp__()""" - return (Facets.__cmp__(self, other) >= 0) - - def __gt__(self, other): - """redefine in terms of __cmp__()""" - return (Facets.__cmp__(self, other) > 0) - - def __le__(self, other): - """redefine in terms of __cmp__()""" - return (Facets.__cmp__(self, other) <= 0) - - def __lt__(self, other): - """redefine in terms of __cmp__()""" - return (Facets.__cmp__(self, other) < 0) - - def __repr__(self): - s = "<" - s += ", ".join([ - "{0}:{1}".format(k, dict.__getitem__(self, k)) - for k in self.__keylist - ]) - s += ">" - - return s - - def __keylist_sort(self): - """Update __keysort, which is used to determine facet matching - order. Inherited facets always take priority over local - facets so make sure all inherited facets come before local - facets in __keylist. All facets from a given source are - sorted by length, and facets of equal length are sorted - lexically.""" - - def facet_sort(x, y): - i = len(y) - len(x) - if i != 0: - return i - return misc.cmp(x, y) - - self.__keylist = [] - self.__keylist += sorted([ - i - for i in self - if i in self.__inherited - ], key=cmp_to_key(facet_sort)) - self.__keylist += sorted([ - i - for i in self - if i not in self.__inherited - ], key=cmp_to_key(facet_sort)) - - def __setitem_internal(self, item, value, inherited=False): - if not item.startswith("facet."): - raise KeyError('key must start with "facet".') - - if not (value == True or value == False): - raise ValueError("value must be boolean") - - keylist_sort = False - if (inherited and item not in self.__inherited) or \ - (not inherited and item not in self): - keylist_sort = True - - # save the facet in the local or inherited dictionary - # clear the corresponding read-only dictionary - if inherited: - self.__inherited[item] = value - self.__inherited_ro = None - else: - self.__local[item] = value - self.__local_ro = None - - # Inherited facets always take priority over local facets. - if inherited or item not in self.__inherited: - dict.__setitem__(self, item, value) - self.__res[item] = re.compile(fnmatch.translate(item)) - - if keylist_sort: - self.__keylist_sort() - - def __setitem__(self, item, value): - """__setitem__ only operates on local facets.""" - self.__setitem_internal(item, value) - - def __getitem_internal(self, item): - """Implement facet lookup algorithm here - - Note that _allow_facet bypasses __getitem__ for performance - reasons; if __getitem__ changes, _allow_facet in _varcet.c - must also be updated. - - We return a tuple of the form (, ) where key is - the explicitly set facet name (which may be a glob pattern) - that matched the caller specific facet name.""" - - if not item.startswith("facet."): - raise KeyError("key must start w/ facet.") - - if item in self: - return item, dict.__getitem__(self, item) - for k in self.__keylist: - if self.__res[k].match(item): - return k, dict.__getitem__(self, k) - - # The trailing '.' is to encourage namespace usage. - if item.startswith("facet.debug.") or \ - item.startswith("facet.optional."): - return None, False # exclude by default - return None, True # be inclusive - - def __getitem__(self, item): - return self.__getitem_internal(item)[1] - - def __delitem_internal(self, item, inherited=False): - - # check for an attempt to delete an invalid facet - if not dict.__contains__(self, item): - raise KeyError(item) - - # check for an attempt to delete an invalid local facet - if not inherited and item not in self.__local: - raise KeyError(item) - - # we should never try to delete an invalid inherited facet - assert not inherited or item in self.inherited - - keylist_sort = False - if inherited and item in self.__local: - # the inherited value was overriding a local value - # that should now be exposed - dict.__setitem__(self, item, self.__local[item]) - self.__res[item] = re.compile(fnmatch.translate(item)) - keylist_sort = True - else: - # delete the item - dict.__delitem__(self, item) - del self.__res[item] - self.__keylist.remove(item) - - # delete item from the local or inherited dictionary - # clear the corresponding read-only dictionary - if inherited: - rv = self.__inherited[item] - del self.__inherited[item] - self.__inherited_ro = None - else: - rv = self.__local[item] - del self.__local[item] - self.__local_ro = None - - if keylist_sort: - self.__keylist_sort() - return rv - - def __delitem__(self, item): - """__delitem__ only operates on local facets.""" - self.__delitem_internal(item) - - # allow_action is provided as a native function (see end of class - # declaration). - - def _set_inherited(self, item, value): - """Set an inherited facet.""" - self.__setitem_internal(item, value, inherited=True) - - def _clear_inherited(self): - """Clear all inherited facet.""" - for k in list(self.__inherited.keys()): - self.__delitem_internal(k, inherited=True) - - def _action_match(self, act): - """Find the subset of facet key/values pairs which match any - facets present on an action.""" - - # find all the facets present in the current action - action_facets = frozenset([ - a - for a in act.attrs - if a.startswith("facet.") - ]) - - rv = set() - for facet in self.__keylist: - if facet in action_facets: - # we found a matching facet. - rv.add((facet, self[facet])) - continue - for action_facet in action_facets: - if self.__res[facet].match(action_facet): - # we found a matching facet. - rv.add((facet, self[facet])) - break - - return (frozenset(rv)) - - def pop(self, item, *args, **kwargs): - """pop() only operates on local facets.""" - - assert len(args) == 0 or (len(args) == 1 and - "default" not in kwargs) - - if item not in self.__local: - # check if the user specified a default value - if args: - return args[0] - elif "default" in kwargs: - return kwargs["default"] - if len(self) == 0: - raise KeyError('pop(): dictionary is empty') - raise KeyError(item) - - return self.__delitem_internal(item, inherited=False) - - def popitem(self): - """popitem() only operates on local facets.""" - - item = None - for item, value in self.__local: - break - - if item is None: - raise KeyError('popitem(): dictionary is empty') - - self.__delitem_internal(item) - return (item, value) - - def setdefault(self, item, default=None): - if item not in self: - self[item] = default - return self[item] - - def update(self, d): - if type(d) == Facets: - # preserve inherited facets. - for k, v in six.iteritems(d.__inherited): - self._set_inherited(k, v) - for k, v in six.iteritems(d.__local): - self[k] = v - return - - for k in d: - self[k] = d[k] - - def keys(self): - return self.__keylist[:] - - def values(self): - return [self[k] for k in self.__keylist] - - def _src_values(self, name): - """A facet may be set via multiple sources and hence have - multiple values. If there are multiple values for a facet, - all but one of those values will be masked. So for a given - facet, return a list of tuples of the form (, , - ) which represent all currently set values for this - facet.""" - - rv = [] - if name in self.__inherited: - src = self.FACET_SRC_PARENT - value = self.__inherited[name] - masked = False - rv.append((value, src, masked)) - if name in self.__local: - src = self.FACET_SRC_LOCAL - value = self.__local[name] - masked = False - if name in self.__inherited: - masked = True - rv.append((value, src, masked)) - return rv - - def items(self): - return [a for a in self.iteritems()] - - def iteritems(self): # return in sorted order for display - for k in self.__keylist: - yield k, self[k] - - def copy(self): - return Facets(self) - - def clear(self): - self.__keylist = [] - self.__res = {} - self.__local = {} - self.__local_ro = None - self.__inherited = {} - self.__inherited_ro = None - dict.clear(self) - - def _match_src(self, name): - """Report the source of a facet value if we were to attempt to - look it up in the current Facets object dictionary.""" - - k = self.__getitem_internal(name)[0] - if k in self.__inherited: - return self.FACET_SRC_PARENT - if k in self.__local: - return self.FACET_SRC_LOCAL - assert k is None and k not in self - return self.FACET_SRC_SYSTEM - - # For convenience, provide callers with direct access to local and - # parent facets via cached read-only dictionaries. - @property - def local(self): - if self.__local_ro is None: - self.__local_ro = ImmutableDict(self.__local) - return self.__local_ro - - @property - def inherited(self): - if self.__inherited_ro is None: - self.__inherited_ro = ImmutableDict(self.__inherited) - return self.__inherited_ro - - - if six.PY3: - def allow_action(self, action, publisher=None): - return _allow_facet(self, action, publisher=publisher) + # store information on facets; subclass dict + # and maintain ordered list of keys sorted + # by length. + + # subclass __getitem__ so that queries w/ + # actual facets find match + + # + # For image planning purposes and to be able to compare facet objects + # deterministically, facets must be sorted. They are first sorted by + # source (more details below), then by length, then lexically. + # + # Facets can come from three different sources. + # + # SYSTEM facets are facets whose values are assigned by the system. + # These are usually facets defined in packages which are not set in an + # image, and the value assigned by the system is always true. These + # facets will usually never be found in a Facets dictionary. (Facets + # dictionaries only contain facets which are explicitly set.) + # + # LOCAL facets are facets which have been set locally in an image + # using pkg(1) or the pkg api. Explicitly set LOCAL facets are stored + # in Facets.__local. Facets which are not explicitly set but match an + # explicitly set LOCAL facet glob pattern are also considered to be + # LOCAL. + # + # PARENT facets are facets which are inherited from a parent image. + # they are managed internally by the packaging subsystem. Explicitly + # inherited facets are stored in Facets.__inherited. Facets which are + # not explicitly set but match an explicitly set PARENT facet glob + # pattern are also considered to be PARENT. + # + # When evaluating facets, all PARENT facets are evaluated before LOCAL + # facets. This is done by ensuring that all PARENT facets come before + # any LOCAL facets in __keylist. This is done because PARENT facets + # exist to propagate faceted dependencies between linked images, which + # is needed to ensure the solver can run successfully. ie, if a + # parent image relaxes dependencies via facet version-locks, then the + # child needs to inherit those facets since otherwise it is more + # constrained in possible solutions than it's parent and likely won't + # be able to plan an update that keeps it in sync with it's parent. + # + # Sine PARENT facets take priority over LOCAL facets, it's possible to + # have conflicts between the two. In the case where a facet is both + # inherited and set locally, both values are preserved, but the + # inherited value masks the local value. Users can list and update + # local values while they are masked using pkg(1), but as long as the + # values are masked they will not affect image planning operations. + # Once an inherited facet that masks a local facet is removed, the + # local facet will be restored. + # + + FACET_SRC_SYSTEM = "system" + FACET_SRC_LOCAL = "local" + FACET_SRC_PARENT = "parent" + + def __init__(self, init=EmptyI): + dict.__init__(self) + self.__keylist = [] + self.__res = {} + self.__local = {} + self.__local_ro = None + self.__inherited = {} + self.__inherited_ro = None + + # initialize ourselves + self.update(init) + + @staticmethod + def getstate(obj, je_state=None): + """Returns the serialized state of this object in a format + that that can be easily stored using JSON, pickle, etc.""" + + return [ + [misc.force_text(k), v, True] + for k, v in six.iteritems(obj.__inherited) + ] + [ + [misc.force_text(k), v, False] + for k, v in six.iteritems(obj.__local) + ] + + @staticmethod + def fromstate(state, jd_state=None): + """Update the state of this object using previously serialized + state obtained via getstate().""" + + rv = Facets() + for k, v, inhertited in state: + if not inhertited: + rv[k] = v + else: + rv._set_inherited(k, v) + return rv + + def _cmp_priority(self, other): + """Compare the facet match priority of two Facets objects. + Since the match priority of a Facets object is dependent upon + facet sources (local vs parent) and names, we're essentially + ensuring that both objects have the same set of facet sources + and names.""" + + assert type(other) is Facets + return misc.cmp(self.__keylist, other.__keylist) + + def _cmp_values(self, other): + """Compare the facet values of two Facets objects. This + comparison ignores any masked values.""" + + assert type(other) is Facets + return misc.cmp(self, other) + + def _cmp_all_values(self, other): + """Compare all the facet values of two Facets objects. This + comparison takes masked values into account.""" + + assert type(other) is Facets + rv = misc.cmp(self.__inherited, other.__inherited) + if rv == 0: + rv = misc.cmp(self.__local, other.__local) + return rv + + # this __cmp__ is used as a helper function for the rich comparison + # methods. + # __cmp__ defined; pylint: disable=W1630 + def __cmp__(self, other): + """Compare two Facets objects. This comparison takes masked + values into account.""" + + # check if we're getting compared against something other than + # another Factes object. + if type(other) is not Facets: + return 1 + + # Check for effective facet value changes that could affect + # solver computations. + rv = self._cmp_values(other) + if rv != 0: + return rv + + # Check for facet priority changes that could affect solver + # computations. (Priority changes can occur when local or + # inherited facets are added or removed.) + rv = self._cmp_priority(other) + if rv != 0: + return rv + + # There are no outwardly visible facet priority or value + # changes that could affect solver computations, but it's + # still possible that we're changing the set of local or + # inherited facets in a way that doesn't affect solver + # computations. For example: we could be adding a local + # facet with a value that is masked by an inherited facet, or + # having a facet transition from being inherited to being + # local without a priority or value change. Check if this is + # the case. + rv = self._cmp_all_values(other) + return rv + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + """redefine in terms of __cmp__()""" + return Facets.__cmp__(self, other) == 0 + + def __ne__(self, other): + """redefine in terms of __cmp__()""" + return Facets.__cmp__(self, other) != 0 + + def __ge__(self, other): + """redefine in terms of __cmp__()""" + return Facets.__cmp__(self, other) >= 0 + + def __gt__(self, other): + """redefine in terms of __cmp__()""" + return Facets.__cmp__(self, other) > 0 + + def __le__(self, other): + """redefine in terms of __cmp__()""" + return Facets.__cmp__(self, other) <= 0 + + def __lt__(self, other): + """redefine in terms of __cmp__()""" + return Facets.__cmp__(self, other) < 0 + + def __repr__(self): + s = "<" + s += ", ".join( + [ + "{0}:{1}".format(k, dict.__getitem__(self, k)) + for k in self.__keylist + ] + ) + s += ">" + + return s + + def __keylist_sort(self): + """Update __keysort, which is used to determine facet matching + order. Inherited facets always take priority over local + facets so make sure all inherited facets come before local + facets in __keylist. All facets from a given source are + sorted by length, and facets of equal length are sorted + lexically.""" + + def facet_sort(x, y): + i = len(y) - len(x) + if i != 0: + return i + return misc.cmp(x, y) + + self.__keylist = [] + self.__keylist += sorted( + [i for i in self if i in self.__inherited], + key=cmp_to_key(facet_sort), + ) + self.__keylist += sorted( + [i for i in self if i not in self.__inherited], + key=cmp_to_key(facet_sort), + ) + + def __setitem_internal(self, item, value, inherited=False): + if not item.startswith("facet."): + raise KeyError('key must start with "facet".') + + if not (value == True or value == False): + raise ValueError("value must be boolean") + + keylist_sort = False + if (inherited and item not in self.__inherited) or ( + not inherited and item not in self + ): + keylist_sort = True + + # save the facet in the local or inherited dictionary + # clear the corresponding read-only dictionary + if inherited: + self.__inherited[item] = value + self.__inherited_ro = None + else: + self.__local[item] = value + self.__local_ro = None + + # Inherited facets always take priority over local facets. + if inherited or item not in self.__inherited: + dict.__setitem__(self, item, value) + self.__res[item] = re.compile(fnmatch.translate(item)) + + if keylist_sort: + self.__keylist_sort() + + def __setitem__(self, item, value): + """__setitem__ only operates on local facets.""" + self.__setitem_internal(item, value) + + def __getitem_internal(self, item): + """Implement facet lookup algorithm here + + Note that _allow_facet bypasses __getitem__ for performance + reasons; if __getitem__ changes, _allow_facet in _varcet.c + must also be updated. + + We return a tuple of the form (, ) where key is + the explicitly set facet name (which may be a glob pattern) + that matched the caller specific facet name.""" + + if not item.startswith("facet."): + raise KeyError("key must start w/ facet.") + + if item in self: + return item, dict.__getitem__(self, item) + for k in self.__keylist: + if self.__res[k].match(item): + return k, dict.__getitem__(self, k) + + # The trailing '.' is to encourage namespace usage. + if item.startswith("facet.debug.") or item.startswith( + "facet.optional." + ): + return None, False # exclude by default + return None, True # be inclusive + + def __getitem__(self, item): + return self.__getitem_internal(item)[1] + + def __delitem_internal(self, item, inherited=False): + # check for an attempt to delete an invalid facet + if not dict.__contains__(self, item): + raise KeyError(item) + + # check for an attempt to delete an invalid local facet + if not inherited and item not in self.__local: + raise KeyError(item) + + # we should never try to delete an invalid inherited facet + assert not inherited or item in self.inherited + + keylist_sort = False + if inherited and item in self.__local: + # the inherited value was overriding a local value + # that should now be exposed + dict.__setitem__(self, item, self.__local[item]) + self.__res[item] = re.compile(fnmatch.translate(item)) + keylist_sort = True + else: + # delete the item + dict.__delitem__(self, item) + del self.__res[item] + self.__keylist.remove(item) + + # delete item from the local or inherited dictionary + # clear the corresponding read-only dictionary + if inherited: + rv = self.__inherited[item] + del self.__inherited[item] + self.__inherited_ro = None + else: + rv = self.__local[item] + del self.__local[item] + self.__local_ro = None + + if keylist_sort: + self.__keylist_sort() + return rv + + def __delitem__(self, item): + """__delitem__ only operates on local facets.""" + self.__delitem_internal(item) + + # allow_action is provided as a native function (see end of class + # declaration). + + def _set_inherited(self, item, value): + """Set an inherited facet.""" + self.__setitem_internal(item, value, inherited=True) + + def _clear_inherited(self): + """Clear all inherited facet.""" + for k in list(self.__inherited.keys()): + self.__delitem_internal(k, inherited=True) + + def _action_match(self, act): + """Find the subset of facet key/values pairs which match any + facets present on an action.""" + + # find all the facets present in the current action + action_facets = frozenset( + [a for a in act.attrs if a.startswith("facet.")] + ) + + rv = set() + for facet in self.__keylist: + if facet in action_facets: + # we found a matching facet. + rv.add((facet, self[facet])) + continue + for action_facet in action_facets: + if self.__res[facet].match(action_facet): + # we found a matching facet. + rv.add((facet, self[facet])) + break + + return frozenset(rv) + + def pop(self, item, *args, **kwargs): + """pop() only operates on local facets.""" + + assert len(args) == 0 or (len(args) == 1 and "default" not in kwargs) + + if item not in self.__local: + # check if the user specified a default value + if args: + return args[0] + elif "default" in kwargs: + return kwargs["default"] + if len(self) == 0: + raise KeyError("pop(): dictionary is empty") + raise KeyError(item) + + return self.__delitem_internal(item, inherited=False) + + def popitem(self): + """popitem() only operates on local facets.""" + + item = None + for item, value in self.__local: + break + + if item is None: + raise KeyError("popitem(): dictionary is empty") + + self.__delitem_internal(item) + return (item, value) + + def setdefault(self, item, default=None): + if item not in self: + self[item] = default + return self[item] + + def update(self, d): + if type(d) == Facets: + # preserve inherited facets. + for k, v in six.iteritems(d.__inherited): + self._set_inherited(k, v) + for k, v in six.iteritems(d.__local): + self[k] = v + return + + for k in d: + self[k] = d[k] + + def keys(self): + return self.__keylist[:] + + def values(self): + return [self[k] for k in self.__keylist] + + def _src_values(self, name): + """A facet may be set via multiple sources and hence have + multiple values. If there are multiple values for a facet, + all but one of those values will be masked. So for a given + facet, return a list of tuples of the form (, , + ) which represent all currently set values for this + facet.""" + + rv = [] + if name in self.__inherited: + src = self.FACET_SRC_PARENT + value = self.__inherited[name] + masked = False + rv.append((value, src, masked)) + if name in self.__local: + src = self.FACET_SRC_LOCAL + value = self.__local[name] + masked = False + if name in self.__inherited: + masked = True + rv.append((value, src, masked)) + return rv + + def items(self): + return [a for a in self.iteritems()] + + def iteritems(self): # return in sorted order for display + for k in self.__keylist: + yield k, self[k] + + def copy(self): + return Facets(self) + + def clear(self): + self.__keylist = [] + self.__res = {} + self.__local = {} + self.__local_ro = None + self.__inherited = {} + self.__inherited_ro = None + dict.clear(self) + + def _match_src(self, name): + """Report the source of a facet value if we were to attempt to + look it up in the current Facets object dictionary.""" + + k = self.__getitem_internal(name)[0] + if k in self.__inherited: + return self.FACET_SRC_PARENT + if k in self.__local: + return self.FACET_SRC_LOCAL + assert k is None and k not in self + return self.FACET_SRC_SYSTEM + + # For convenience, provide callers with direct access to local and + # parent facets via cached read-only dictionaries. + @property + def local(self): + if self.__local_ro is None: + self.__local_ro = ImmutableDict(self.__local) + return self.__local_ro + + @property + def inherited(self): + if self.__inherited_ro is None: + self.__inherited_ro = ImmutableDict(self.__inherited) + return self.__inherited_ro + + if six.PY3: + + def allow_action(self, action, publisher=None): + return _allow_facet(self, action, publisher=publisher) + if six.PY2: - Facets.allow_action = types.MethodType(_allow_facet, None, Facets) + Facets.allow_action = types.MethodType(_allow_facet, None, Facets) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/file_layout/__init__.py b/src/modules/file_layout/__init__.py index d09ad6537..7d0b8aa17 100644 --- a/src/modules/file_layout/__init__.py +++ b/src/modules/file_layout/__init__.py @@ -25,9 +25,8 @@ # Use is subject to license terms. # -__all__ = [ -] - +__all__ = [] + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/file_layout/file_manager.py b/src/modules/file_layout/file_manager.py index 5de60acab..070e067ce 100644 --- a/src/modules/file_layout/file_manager.py +++ b/src/modules/file_layout/file_manager.py @@ -47,335 +47,327 @@ import pkg.portable as portable import pkg.file_layout.layout as layout + class NeedToModifyReadOnlyFileManager(apx.ApiException): - """This exception is raised when the caller attempts to modify a - read-only FileManager.""" + """This exception is raised when the caller attempts to modify a + read-only FileManager.""" - def __init__(self, thing_to_change, create="create"): - """Create a NeedToModifyReadOnlyFileManager exception. + def __init__(self, thing_to_change, create="create"): + """Create a NeedToModifyReadOnlyFileManager exception. - The "thing_to_change" parameter is the entity that the file - manager was asked to modify. + The "thing_to_change" parameter is the entity that the file + manager was asked to modify. - The "create" parameter describes what kind of modification - was being attempted.""" + The "create" parameter describes what kind of modification + was being attempted.""" - apx.ApiException.__init__(self) - self.ent = thing_to_change - self.create = create + apx.ApiException.__init__(self) + self.ent = thing_to_change + self.create = create - def __str__(self): - return _("The FileManager cannot {cre} {ent} because it " - "is configured read-only.").format( - cre=self.create, ent=self.ent) + def __str__(self): + return _( + "The FileManager cannot {cre} {ent} because it " + "is configured read-only." + ).format(cre=self.create, ent=self.ent) class FMInsertionFailure(apx.ApiException): - """Used to indicate that an in-progress insert failed because the - item to be inserted went missing during the operation and wasn't - already found in the cache.""" + """Used to indicate that an in-progress insert failed because the + item to be inserted went missing during the operation and wasn't + already found in the cache.""" - def __init__(self, src, dest): - apx.ApiException.__init__(self) - self.src = src - self.dest = dest + def __init__(self, src, dest): + apx.ApiException.__init__(self) + self.src = src + self.dest = dest - def __str__(self): - return _("{src} was removed while FileManager was attempting " - "to insert it into the cache as {dest}.").format( - **self.__dict__) + def __str__(self): + return _( + "{src} was removed while FileManager was attempting " + "to insert it into the cache as {dest}." + ).format(**self.__dict__) class FMPermissionsException(apx.PermissionsException): - """This exception is raised when a FileManager does not have the - permissions to operate as needed on the file system.""" + """This exception is raised when a FileManager does not have the + permissions to operate as needed on the file system.""" - def __str__(self): - return _("FileManager was unable to create {0} or the " - "directories containing it.").format(self.path) + def __str__(self): + return _( + "FileManager was unable to create {0} or the " + "directories containing it." + ).format(self.path) class UnrecognizedFilePaths(apx.ApiException): - """This exception is raised when files are found under the FileManager's - root which cannot be accounted for.""" + """This exception is raised when files are found under the FileManager's + root which cannot be accounted for.""" - def __init__(self, filepaths): - apx.ApiException.__init__(self) - self.fps = filepaths + def __init__(self, filepaths): + apx.ApiException.__init__(self) + self.fps = filepaths - def __str__(self): - return _("The following paths were found but cannot be " - "accounted for by any of the known layouts:\n{0}").format( - "\n".join(self.fps)) + def __str__(self): + return _( + "The following paths were found but cannot be " + "accounted for by any of the known layouts:\n{0}" + ).format("\n".join(self.fps)) class FileManager(object): - """The FileManager class handles the insertion and removal of files - within its directory according to a strategy for organizing the - files.""" - - def __init__(self, root, readonly, layouts=None): - """Initialize the FileManager object. - - The "root" parameter is a path to the directory to manage. - - The "readonly" parameter determines whether files can be - inserted, removed, or moved.""" - - if not root: - raise ValueError("root must not be none") - self.root = root - self.readonly = readonly - if layouts is not None: - if not isinstance(layouts, collections.abc.Iterable): - layouts = [layouts] - self.layouts = layouts + """The FileManager class handles the insertion and removal of files + within its directory according to a strategy for organizing the + files.""" + + def __init__(self, root, readonly, layouts=None): + """Initialize the FileManager object. + + The "root" parameter is a path to the directory to manage. + + The "readonly" parameter determines whether files can be + inserted, removed, or moved.""" + + if not root: + raise ValueError("root must not be none") + self.root = root + self.readonly = readonly + if layouts is not None: + if not isinstance(layouts, collections.abc.Iterable): + layouts = [layouts] + self.layouts = layouts + else: + self.layouts = layout.get_default_layouts() + + def set_read_only(self): + """Make the FileManager read only.""" + self.readonly = True + + def __select_path(self, hashval, check_existence): + """Find the path to the file with name hashval. + + The "hashval" parameter is the name of the file to find. + + The "check_existence" parameter determines whether the function + will ensure that a file exists at the returned path.""" + + cur_path = None + cur_full_path = None + dest_full_path = None + for l in self.layouts: + cur_path = l.lookup(hashval) + cur_full_path = os.path.join(self.root, cur_path) + # The first layout in self.layouts is the desired + # location. If that location has not been stored, + # record it. + if dest_full_path is None: + dest_full_path = cur_full_path + if not check_existence or os.path.exists(cur_full_path): + return cur_full_path, dest_full_path + return None, dest_full_path + + def lookup(self, hashval, opener=False, check_existence=True): + """Find the file for hashval. + + The "hashval" parameter contains the name of the file to be + found. + + The "opener" parameter determines whether the function will + return a path or an open file handle.""" + + cur_full_path, dest_full_path = self.__select_path( + hashval, check_existence + ) + if not cur_full_path: + return None + + # If the depot isn't readonly and the file isn't in the location + # that the primary layout thinks it should be, try to move the + # file into the right place. + if dest_full_path != cur_full_path and not self.readonly: + p_sdir = os.path.dirname(cur_full_path) + try: + # Attempt to move the file from the old location + # to the preferred location. + try: + portable.rename(cur_full_path, dest_full_path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + p_ddir = os.path.dirname(dest_full_path) + if os.path.isdir(p_ddir): + raise + + try: + os.makedirs(p_ddir) + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + raise FMPermissionsException(e.filename) + # If directory creation failed + # due to EEXIST, but the entry + # it failed for isn't the + # immediate parent, assume + # there's a larger problem + # and re-raise the exception. + # For file_manager, this is + # believed to be unlikely. + if not ( + e.errno == errno.EEXIST and e.filename == p_ddir + ): + raise + + portable.rename(cur_full_path, dest_full_path) + + # Since the file has been moved, point at the + # new destination *before* attempting to remove + # the (now possibly empty) parent directory of + # of the source file. + cur_full_path = dest_full_path + + # This may fail because other files can still + # exist in the parent path for the source, so + # must be done last. + os.removedirs(p_sdir) + except EnvironmentError: + # If there's an error during these operations, + # check that cur_full_path still exists. If + # it's gone, return None. + if not os.path.exists(cur_full_path): + return None + + if opener: + return open(cur_full_path, "rb") + return cur_full_path + + def copy(self, hashval, src_path): + """Copy the content at "src_path" to the files under the name + "hashval". Returns the path to the copied file.""" + return self.__place(hashval, src_path, portable.copyfile) + + def insert(self, hashval, src_path): + """Add the content at "src_path" to the files under the name + "hashval". Returns the path to the inserted file.""" + return self.__place(hashval, src_path, portable.rename) + + def __place(self, hashval, src_path, pfunc): + """Add the content at "src_path" to the files under the name + "hashval". Returns the path to the inserted file.""" + + if self.readonly: + raise NeedToModifyReadOnlyFileManager(hashval) + cur_full_path, dest_full_path = self.__select_path(hashval, True) + + if cur_full_path and cur_full_path != dest_full_path: + # The file is stored in an old location and needs to be + # moved to a new location. To prevent disruption of + # service or other race conditions, rename the source + # file into the old place first. + try: + portable.rename(src_path, cur_full_path) + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + raise FMPermissionsException(e.filename) + raise + src_path = cur_full_path + + while True: + try: + # Place the file. + pfunc(src_path, dest_full_path) + except EnvironmentError as e: + p_dir = os.path.dirname(dest_full_path) + if e.errno == errno.ENOENT and not os.path.isdir(p_dir): + try: + os.makedirs(p_dir) + except EnvironmentError as e: + if e.errno == errno.EACCES or e.errno == errno.EROFS: + raise FMPermissionsException(e.filename) + # If directory creation failed + # due to EEXIST, but the entry + # it failed for isn't the + # immediate parent, assume + # there's a larger problem and + # re-raise the exception. For + # file_manager, this is believed + # to be unlikely. + if not ( + e.errno == errno.EEXIST and e.filename == p_dir + ): + raise + + # Parent directory created successsfully + # so loop again to retry place. + elif e.errno == errno.ENOENT and not os.path.exists(src_path): + if os.path.exists(dest_full_path): + # Item has already been moved + # into cache by another process; + # nothing more to do. (This + # could happen during parallel + # publication.) + return dest_full_path + raise FMInsertionFailure(src_path, dest_full_path) + elif e.errno == errno.EACCES or e.errno == errno.EROFS: + raise FMPermissionsException(e.filename) + elif e.errno != errno.ENOENT: + raise apx._convert_error(e) + else: + # Success! + break + + # Attempt to remove the parent directory of the file's original + # location to ensure empty directories aren't left behind. + if cur_full_path: + try: + os.removedirs(os.path.dirname(cur_full_path)) + except EnvironmentError as e: + if e.errno == errno.ENOENT or e.errno == errno.EEXIST: + pass + elif e.errno == errno.EACCES or e.errno == errno.EROFS: + raise FMPermissionsException(e.filename) else: - self.layouts = layout.get_default_layouts() - - def set_read_only(self): - """Make the FileManager read only.""" - self.readonly = True - - def __select_path(self, hashval, check_existence): - """Find the path to the file with name hashval. - - The "hashval" parameter is the name of the file to find. + raise + + # Return the location of the placed file to the caller. + return dest_full_path + + def remove(self, hashval): + """This function removes the file associated with the name + "hashval".""" + + if self.readonly: + raise NeedToModifyReadOnlyFileManager(hashval, "remove") + for l in self.layouts: + cur_path = l.lookup(hashval) + cur_full_path = os.path.join(self.root, cur_path) + try: + portable.remove(cur_full_path) + os.removedirs(os.path.dirname(cur_full_path)) + except EnvironmentError as e: + if e.errno == errno.ENOENT or e.errno == errno.EEXIST: + pass + elif e.errno == errno.EACCES or e.errno == errno.EROFS: + raise FMPermissionsException(e.filename) + else: + raise - The "check_existence" parameter determines whether the function - will ensure that a file exists at the returned path.""" + def walk(self): + """Generate all the hashes of all files known.""" - cur_path = None - cur_full_path = None - dest_full_path = None - for l in self.layouts: - cur_path = l.lookup(hashval) - cur_full_path = os.path.join(self.root, cur_path) - # The first layout in self.layouts is the desired - # location. If that location has not been stored, - # record it. - if dest_full_path is None: - dest_full_path = cur_full_path - if not check_existence or os.path.exists(cur_full_path): - return cur_full_path, dest_full_path - return None, dest_full_path - - def lookup(self, hashval, opener=False, check_existence=True): - """Find the file for hashval. - - The "hashval" parameter contains the name of the file to be - found. - - The "opener" parameter determines whether the function will - return a path or an open file handle.""" - - cur_full_path, dest_full_path = self.__select_path(hashval, - check_existence) - if not cur_full_path: - return None - - # If the depot isn't readonly and the file isn't in the location - # that the primary layout thinks it should be, try to move the - # file into the right place. - if dest_full_path != cur_full_path and not self.readonly: - p_sdir = os.path.dirname(cur_full_path) - try: - # Attempt to move the file from the old location - # to the preferred location. - try: - portable.rename(cur_full_path, - dest_full_path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - p_ddir = os.path.dirname( - dest_full_path) - if os.path.isdir(p_ddir): - raise - - try: - os.makedirs(p_ddir) - except EnvironmentError as e: - if e.errno == errno.EACCES or \ - e.errno == errno.EROFS: - raise FMPermissionsException( - e.filename) - # If directory creation failed - # due to EEXIST, but the entry - # it failed for isn't the - # immediate parent, assume - # there's a larger problem - # and re-raise the exception. - # For file_manager, this is - # believed to be unlikely. - if not (e.errno == errno.EEXIST and - e.filename == p_ddir): - raise - - portable.rename(cur_full_path, - dest_full_path) - - # Since the file has been moved, point at the - # new destination *before* attempting to remove - # the (now possibly empty) parent directory of - # of the source file. - cur_full_path = dest_full_path - - # This may fail because other files can still - # exist in the parent path for the source, so - # must be done last. - os.removedirs(p_sdir) - except EnvironmentError: - # If there's an error during these operations, - # check that cur_full_path still exists. If - # it's gone, return None. - if not os.path.exists(cur_full_path): - return None - - if opener: - return open(cur_full_path, "rb") - return cur_full_path - - def copy(self, hashval, src_path): - """Copy the content at "src_path" to the files under the name - "hashval". Returns the path to the copied file.""" - return self.__place(hashval, src_path, portable.copyfile) - - def insert(self, hashval, src_path): - """Add the content at "src_path" to the files under the name - "hashval". Returns the path to the inserted file.""" - return self.__place(hashval, src_path, portable.rename) - - def __place(self, hashval, src_path, pfunc): - """Add the content at "src_path" to the files under the name - "hashval". Returns the path to the inserted file.""" - - if self.readonly: - raise NeedToModifyReadOnlyFileManager(hashval) - cur_full_path, dest_full_path = \ - self.__select_path(hashval, True) - - if cur_full_path and cur_full_path != dest_full_path: - # The file is stored in an old location and needs to be - # moved to a new location. To prevent disruption of - # service or other race conditions, rename the source - # file into the old place first. - try: - portable.rename(src_path, cur_full_path) - except EnvironmentError as e: - if e.errno == errno.EACCES or \ - e.errno == errno.EROFS: - raise FMPermissionsException(e.filename) - raise - src_path = cur_full_path - - while True: - try: - # Place the file. - pfunc(src_path, dest_full_path) - except EnvironmentError as e: - p_dir = os.path.dirname(dest_full_path) - if e.errno == errno.ENOENT and \ - not os.path.isdir(p_dir): - try: - os.makedirs(p_dir) - except EnvironmentError as e: - if e.errno == errno.EACCES or \ - e.errno == errno.EROFS: - raise FMPermissionsException( - e.filename) - # If directory creation failed - # due to EEXIST, but the entry - # it failed for isn't the - # immediate parent, assume - # there's a larger problem and - # re-raise the exception. For - # file_manager, this is believed - # to be unlikely. - if not (e.errno == errno.EEXIST - and e.filename == p_dir): - raise - - # Parent directory created successsfully - # so loop again to retry place. - elif e.errno == errno.ENOENT and \ - not os.path.exists(src_path): - if os.path.exists(dest_full_path): - # Item has already been moved - # into cache by another process; - # nothing more to do. (This - # could happen during parallel - # publication.) - return dest_full_path - raise FMInsertionFailure(src_path, - dest_full_path) - elif e.errno == errno.EACCES or \ - e.errno == errno.EROFS: - raise FMPermissionsException(e.filename) - elif e.errno != errno.ENOENT: - raise apx._convert_error(e) - else: - # Success! - break - - # Attempt to remove the parent directory of the file's original - # location to ensure empty directories aren't left behind. - if cur_full_path: - try: - os.removedirs(os.path.dirname(cur_full_path)) - except EnvironmentError as e: - if e.errno == errno.ENOENT or \ - e.errno == errno.EEXIST: - pass - elif e.errno == errno.EACCES or \ - e.errno == errno.EROFS: - raise FMPermissionsException(e.filename) - else: - raise - - # Return the location of the placed file to the caller. - return dest_full_path - - def remove(self, hashval): - """This function removes the file associated with the name - "hashval".""" - - if self.readonly: - raise NeedToModifyReadOnlyFileManager(hashval, - "remove") + unrecognized = [] + for dirpath, dirnames, filenames in os.walk(self.root): + for fn in filenames: + fp = os.path.join(dirpath, fn) + fp = fp[len(self.root) :].lstrip(os.path.sep) for l in self.layouts: - cur_path = l.lookup(hashval) - cur_full_path = os.path.join(self.root, cur_path) - try: - portable.remove(cur_full_path) - os.removedirs(os.path.dirname(cur_full_path)) - except EnvironmentError as e: - if e.errno == errno.ENOENT or \ - e.errno == errno.EEXIST: - pass - elif e.errno == errno.EACCES or \ - e.errno == errno.EROFS: - raise FMPermissionsException(e.filename) - else: - raise - - def walk(self): - """Generate all the hashes of all files known.""" - - unrecognized = [] - for dirpath, dirnames, filenames in os.walk(self.root): - for fn in filenames: - fp = os.path.join(dirpath, fn) - fp = fp[len(self.root):].lstrip(os.path.sep) - for l in self.layouts: - if l.contains(fp, fn): - yield l.path_to_hash(fp) - break - else: - unrecognized.append(fp) - if unrecognized: - raise UnrecognizedFilePaths(unrecognized) + if l.contains(fp, fn): + yield l.path_to_hash(fp) + break + else: + unrecognized.append(fp) + if unrecognized: + raise UnrecognizedFilePaths(unrecognized) + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/file_layout/layout.py b/src/modules/file_layout/layout.py index 2c61dd85d..9cd5c6ffa 100644 --- a/src/modules/file_layout/layout.py +++ b/src/modules/file_layout/layout.py @@ -60,60 +60,63 @@ import os + class Layout(object): - """This class is the parent class to all layouts. It defines the - interface which those subclasses must satisfy.""" - - def lookup(self, hashval): - """Return the path to the file with name "hashval".""" - raise NotImplementedError + """This class is the parent class to all layouts. It defines the + interface which those subclasses must satisfy.""" + + def lookup(self, hashval): + """Return the path to the file with name "hashval".""" + raise NotImplementedError - def path_to_hash(self, path): - """Return the hash which would map to "path".""" - raise NotImplementedError + def path_to_hash(self, path): + """Return the hash which would map to "path".""" + raise NotImplementedError - def contains(self, rel_path, file_name): - """Returns whether this layout would place a file named - "file_name" at "rel_path".""" - return self.lookup(file_name) == rel_path + def contains(self, rel_path, file_name): + """Returns whether this layout would place a file named + "file_name" at "rel_path".""" + return self.lookup(file_name) == rel_path class V0Layout(Layout): - """This class implements the original layout used. It uses a 256 way - split (2 hex digits) followed by a 16.7M way split (6 hex digits).""" + """This class implements the original layout used. It uses a 256 way + split (2 hex digits) followed by a 16.7M way split (6 hex digits).""" - def lookup(self, hashval): - """Return the path to the file with name "hashval".""" - return os.path.join(hashval[0:2], hashval[2:8], hashval) + def lookup(self, hashval): + """Return the path to the file with name "hashval".""" + return os.path.join(hashval[0:2], hashval[2:8], hashval) - def path_to_hash(self, path): - """Return the hash which would map to "path".""" - return os.path.basename(path) + def path_to_hash(self, path): + """Return the hash which would map to "path".""" + return os.path.basename(path) class V1Layout(Layout): - """This class implements the new layout approach which is a single 256 - way fanout using the first two digits of the hash.""" + """This class implements the new layout approach which is a single 256 + way fanout using the first two digits of the hash.""" - def lookup(self, hashval): - """Return the path to the file with name "hashval".""" - return os.path.join(hashval[0:2], hashval) + def lookup(self, hashval): + """Return the path to the file with name "hashval".""" + return os.path.join(hashval[0:2], hashval) - def path_to_hash(self, path): - """Return the hash which would map to "path".""" - return os.path.basename(path) + def path_to_hash(self, path): + """Return the hash which would map to "path".""" + return os.path.basename(path) def get_default_layouts(): - """This function describes the default order in which to use the - layouts defined above.""" + """This function describes the default order in which to use the + layouts defined above.""" + + return [V1Layout(), V0Layout()] - return [V1Layout(), V0Layout()] def get_preferred_layout(): - """This function returns the single preferred layout to use.""" + """This function returns the single preferred layout to use.""" + + return V1Layout() - return V1Layout() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/flavor/__init__.py b/src/modules/flavor/__init__.py index 26eb1a53d..0e27dffa7 100644 --- a/src/modules/flavor/__init__.py +++ b/src/modules/flavor/__init__.py @@ -24,14 +24,7 @@ # Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. # -__all__ = [ - "base" - "elf" - "hardlink" - "pound_bang" - "python" - "smf_manifest" -] +__all__ = ["base" "elf" "hardlink" "pound_bang" "python" "smf_manifest"] # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/flavor/base.py b/src/modules/flavor/base.py index f9137496a..16a891525 100644 --- a/src/modules/flavor/base.py +++ b/src/modules/flavor/base.py @@ -32,410 +32,444 @@ from pkg.portable import PD_DEFAULT_RUNPATH + class DependencyAnalysisError(Exception): - pass + pass class MissingFile(DependencyAnalysisError): - """Exception that is raised when a dependency checker can't find the - file provided.""" - - def __init__(self, file_path, dirs=None, hash=None): - Exception.__init__(self) - self.file_path = file_path - self.dirs = dirs - self.hash = hash - - def __str__(self): - if not self.dirs: - return _("Couldn't find '{0}'").format(self.file_path) - elif self.hash != "NOHASH": - return _("Couldn't find '{hash}' needed for '{path}'" - " in any of the specified search directories:\n" - "{dirs}").format(hash=self.hash, - path=self.file_path, dirs="\n".join( - ["\t" + d for d in sorted(self.dirs)])) - else: - return _("Couldn't find '{path}' in any of the " - "specified search directories:\n{dirs}").format( - path=self.file_path, - dirs="\n".join( - ["\t" + d for d in sorted(self.dirs)])) + """Exception that is raised when a dependency checker can't find the + file provided.""" + + def __init__(self, file_path, dirs=None, hash=None): + Exception.__init__(self) + self.file_path = file_path + self.dirs = dirs + self.hash = hash + + def __str__(self): + if not self.dirs: + return _("Couldn't find '{0}'").format(self.file_path) + elif self.hash != "NOHASH": + return _( + "Couldn't find '{hash}' needed for '{path}'" + " in any of the specified search directories:\n" + "{dirs}" + ).format( + hash=self.hash, + path=self.file_path, + dirs="\n".join(["\t" + d for d in sorted(self.dirs)]), + ) + else: + return _( + "Couldn't find '{path}' in any of the " + "specified search directories:\n{dirs}" + ).format( + path=self.file_path, + dirs="\n".join(["\t" + d for d in sorted(self.dirs)]), + ) + class MultipleDefaultRunpaths(DependencyAnalysisError): - """Exception that is raised when multiple $PGKDEPEND_RUNPATH tokens - are found in a pkg.depend.runpath attribute value.""" + """Exception that is raised when multiple $PGKDEPEND_RUNPATH tokens + are found in a pkg.depend.runpath attribute value.""" - def __init__(self): - Exception.__init__(self) + def __init__(self): + Exception.__init__(self) + + def __str__(self): + return _( + "More than one $PKGDEPEND_RUNPATH token was set on the " + "same action in this manifest." + ) - def __str__(self): - return _( - "More than one $PKGDEPEND_RUNPATH token was set on the " - "same action in this manifest.") class InvalidDependBypassValue(DependencyAnalysisError): - """Exception that is raised when we encounter an incorrect - pkg.depend.bypass-generate attribute value.""" + """Exception that is raised when we encounter an incorrect + pkg.depend.bypass-generate attribute value.""" - def __init__(self, value, error): - self.value = value - self.error = error - Exception.__init__(self) + def __init__(self, value, error): + self.value = value + self.error = error + Exception.__init__(self) - def __str__(self): - return _( - "Invalid pkg.depend.bypass-generate value {val}: " - "{err}").format(val=self.value, err=self.error) + def __str__(self): + return _( + "Invalid pkg.depend.bypass-generate value {val}: " "{err}" + ).format(val=self.value, err=self.error) class InvalidPublishingDependency(DependencyAnalysisError): - """Exception that is raised when base_names or run_paths as well as - full_paths are specified for a PublishingDependency.""" + """Exception that is raised when base_names or run_paths as well as + full_paths are specified for a PublishingDependency.""" - def __init__(self, error): - self.error = error - Exception.__init__(self) + def __init__(self, error): + self.error = error + Exception.__init__(self) - def __str__(self): - return _( - "Invalid publishing dependency: {0}").format(self.error) + def __str__(self): + return _("Invalid publishing dependency: {0}").format(self.error) @total_ordering class Dependency(depend.DependencyAction): - """Base, abstract class to represent the dependencies a dependency - generator can produce.""" - - ERROR = 0 - WARNING = 1 - - DUMMY_FMRI = "__TBD" - DEPEND_DEBUG_PREFIX = "pkg.debug.depend" - DEPEND_TYPE = "require" - - def __init__(self, action, pkg_vars, proto_dir, attrs): - """Each dependency needs to know the action that generated it - and the variants for the package containing that action. - - 'action' is the action which produced this dependency. - - 'pkg_vars' is the list of variants against which the package - delivering the action was published. - - 'proto_dir' is the proto area where the file the action delivers - lives. - - 'attrs' is a dictionary to containing the relevant action tags - for the dependency. - """ - self.action = action - self.pkg_vars = pkg_vars - self.proto_dir = proto_dir - self.dep_vars = self.get_variant_combinations() - - attrs.update([ - ("fmri", self.DUMMY_FMRI), - ("type", self.DEPEND_TYPE), - ("{0}.reason".format(self.DEPEND_DEBUG_PREFIX), - self.action_path()) - ]) - - attrs.update(action.get_variant_template()) - # Only lists are permitted for multi-value action attributes. - for k, v in attrs.items(): - if isinstance(v, set): - attrs[k] = list(v) - - depend.DependencyAction.__init__(self, **attrs) + """Base, abstract class to represent the dependencies a dependency + generator can produce.""" - def is_error(self): - """Return true if failing to resolve this external dependency - should be considered an error.""" + ERROR = 0 + WARNING = 1 - return True + DUMMY_FMRI = "__TBD" + DEPEND_DEBUG_PREFIX = "pkg.debug.depend" + DEPEND_TYPE = "require" - def dep_key(self): - """Return a representation of the location the action depends - on in a way that is hashable.""" + def __init__(self, action, pkg_vars, proto_dir, attrs): + """Each dependency needs to know the action that generated it + and the variants for the package containing that action. - raise NotImplementedError(_("Subclasses of Dependency must " - "implement dep_key. Current class is {0}").format( - self.__class__.__name__)) + 'action' is the action which produced this dependency. - def get_variant_combinations(self, satisfied=False): - """Create the combinations of variants that this action - satisfies or needs satisfied. + 'pkg_vars' is the list of variants against which the package + delivering the action was published. - 'satisfied' determines whether the combination produced is - satisfied or unsatisfied.""" + 'proto_dir' is the proto area where the file the action delivers + lives. - variants = self.action.get_variant_template() - variants.merge_unknown(self.pkg_vars) - return variant.VariantCombinations(variants, - satisfied=satisfied) - - def action_path(self): - """Return the path to the file that generated this dependency. - """ - - return self.action.attrs["path"] - - def key(self): - """Keys for ordering two Dependency objects. Use ComparableMinxin - to do the rich comparison.""" - return (self.dep_key(), self.action_path(), - self.__class__.__name__) - - def __eq__(self, other): - return self.key() == other.key() - - def __lt__(self, other): - return self.key() < other.key() - - def __hash__(self): - return hash(self.key()) - - def get_vars_str(self): - """Produce a string representation of the variants that apply - to the dependency.""" - - if self.dep_vars is not None: - return " " + " ".join([ - ("{0}={1}".format(k, ",".join(self.dep_vars[k]))) - for k in sorted(self.dep_vars.keys()) - ]) - - return "" - - @staticmethod - def make_relative(path, dir): - """If 'path' is an absolute path, make it relative to the - directory path given, otherwise, make it relative to root.""" - if path.startswith(dir): - path = path[len(dir):] - return path.lstrip("/") + 'attrs' is a dictionary to containing the relevant action tags + for the dependency. + """ + self.action = action + self.pkg_vars = pkg_vars + self.proto_dir = proto_dir + self.dep_vars = self.get_variant_combinations() + + attrs.update( + [ + ("fmri", self.DUMMY_FMRI), + ("type", self.DEPEND_TYPE), + ( + "{0}.reason".format(self.DEPEND_DEBUG_PREFIX), + self.action_path(), + ), + ] + ) + + attrs.update(action.get_variant_template()) + # Only lists are permitted for multi-value action attributes. + for k, v in attrs.items(): + if isinstance(v, set): + attrs[k] = list(v) + + depend.DependencyAction.__init__(self, **attrs) + + def is_error(self): + """Return true if failing to resolve this external dependency + should be considered an error.""" + + return True + + def dep_key(self): + """Return a representation of the location the action depends + on in a way that is hashable.""" + + raise NotImplementedError( + _( + "Subclasses of Dependency must " + "implement dep_key. Current class is {0}" + ).format(self.__class__.__name__) + ) + + def get_variant_combinations(self, satisfied=False): + """Create the combinations of variants that this action + satisfies or needs satisfied. + + 'satisfied' determines whether the combination produced is + satisfied or unsatisfied.""" + + variants = self.action.get_variant_template() + variants.merge_unknown(self.pkg_vars) + return variant.VariantCombinations(variants, satisfied=satisfied) + + def action_path(self): + """Return the path to the file that generated this dependency.""" + + return self.action.attrs["path"] + + def key(self): + """Keys for ordering two Dependency objects. Use ComparableMinxin + to do the rich comparison.""" + return (self.dep_key(), self.action_path(), self.__class__.__name__) + + def __eq__(self, other): + return self.key() == other.key() + + def __lt__(self, other): + return self.key() < other.key() + + def __hash__(self): + return hash(self.key()) + + def get_vars_str(self): + """Produce a string representation of the variants that apply + to the dependency.""" + + if self.dep_vars is not None: + return " " + " ".join( + [ + ("{0}={1}".format(k, ",".join(self.dep_vars[k]))) + for k in sorted(self.dep_vars.keys()) + ] + ) + + return "" + + @staticmethod + def make_relative(path, dir): + """If 'path' is an absolute path, make it relative to the + directory path given, otherwise, make it relative to root.""" + if path.startswith(dir): + path = path[len(dir) :] + return path.lstrip("/") class PublishingDependency(Dependency): - """This class serves as a base for all dependencies. It handles - dependencies with multiple files, multiple paths, or both. - - File dependencies are stored either as a list of base_names and - a list of run_paths, or are expanded, and stored as a list of - full_paths to each file that could satisfy the dependency. + """This class serves as a base for all dependencies. It handles + dependencies with multiple files, multiple paths, or both. + + File dependencies are stored either as a list of base_names and + a list of run_paths, or are expanded, and stored as a list of + full_paths to each file that could satisfy the dependency. + """ + + def __init__( + self, + action, + base_names, + run_paths, + pkg_vars, + proto_dir, + kind, + full_paths=None, + ): + """Construct a PublishingDependency object. + + 'action' is the action which produced this dependency. + + 'base_names' is the list of files of the dependency. + + 'run_paths' is the list of directory paths to the file of the + dependency. + + 'pkg_vars' is the list of variants against which the package + delivering the action was published. + + 'proto_dir' is the proto area where the file the action delivers + lives. It may be None if the notion of a proto_dir is + meaningless for a particular PublishingDependency. + + 'kind' is the kind of dependency that this is. + + 'full_paths' if not None, is used instead of the combination of + 'base_names' and 'run_paths' when defining dependencies where + exact paths to files matter (for example, SMF dependencies which + are satisfied by more than one SMF manifest are not searched for + using the manifest base_name in a list of run_paths, unlike + python modules, which use $PYTHONPATH.) Specifying full_paths + as well as base_names/run_paths combinations is not allowed. """ - def __init__(self, action, base_names, run_paths, pkg_vars, proto_dir, - kind, full_paths=None): - """Construct a PublishingDependency object. - - 'action' is the action which produced this dependency. - - 'base_names' is the list of files of the dependency. - - 'run_paths' is the list of directory paths to the file of the - dependency. - - 'pkg_vars' is the list of variants against which the package - delivering the action was published. - - 'proto_dir' is the proto area where the file the action delivers - lives. It may be None if the notion of a proto_dir is - meaningless for a particular PublishingDependency. - - 'kind' is the kind of dependency that this is. - - 'full_paths' if not None, is used instead of the combination of - 'base_names' and 'run_paths' when defining dependencies where - exact paths to files matter (for example, SMF dependencies which - are satisfied by more than one SMF manifest are not searched for - using the manifest base_name in a list of run_paths, unlike - python modules, which use $PYTHONPATH.) Specifying full_paths - as well as base_names/run_paths combinations is not allowed. - """ - - if full_paths and (base_names or run_paths): - # this should never happen, as consumers should always - # construct PublishingDependency objects using either - # full_paths or a combination of base_names and - # run_paths. - raise InvalidPublishingDependency( - "A dependency was specified using full_paths={0} as " - "well as base_names={1} and run_paths={2}".format( - full_paths, base_names, run_paths)) - - self.base_names = sorted(base_names) - - if full_paths == None: - self.full_paths = [] - else: - self.full_paths = full_paths - - if proto_dir is None: - self.run_paths = sorted(run_paths) - # proto_dir is set to "" so that the proto_dir can be - # joined unconditionally with other paths. This makes - # the code path in _check_path simpler. - proto_dir = "" - else: - self.run_paths = sorted([ - self.make_relative(rp, proto_dir) - for rp in run_paths - ]) - - attrs = {"{0}.type".format(self.DEPEND_DEBUG_PREFIX): kind} - if self.full_paths: - attrs["{0}.fullpath".format(self.DEPEND_DEBUG_PREFIX)] = \ - self.full_paths - else: - attrs.update({ - "{0}.file".format( - self.DEPEND_DEBUG_PREFIX): self.base_names, - "{0}.path".format( - self.DEPEND_DEBUG_PREFIX): self.run_paths, - }) - - Dependency.__init__(self, action, pkg_vars, proto_dir, attrs) - - def dep_key(self): - """Return the a value that represents the path of the - dependency. It must be hashable.""" - if self.full_paths: - return (tuple(self.full_paths)) - else: - return (tuple(self.base_names), tuple(self.run_paths)) - - def _check_path(self, path_to_check, delivered_files): - """Takes a dictionary of files that are known to exist, and - returns the path to the file that satisfies this dependency, or - None if no such delivered file exists.""" - - # Using normpath and realpath are ok here because the dependency - # is being checked against the files, directories, and links - # delivered in the proto area. - if path_to_check in delivered_files: - return path_to_check - norm_path = os.path.normpath(os.path.join(self.proto_dir, - path_to_check)) - if norm_path in delivered_files: - return norm_path - - real_path = os.path.realpath(norm_path) - if real_path in delivered_files: - return real_path - - return None - - def possibly_delivered(self, delivered_files, links, resolve_links, - orig_dep_vars): - """Finds a list of files which satisfy this dependency, and the - variants under which each file satisfies it. It takes into - account links and hardlinks. - - 'delivered_files' is a dictionary which maps paths to the - packages that deliver the path and the variants under which the - path is present. - - 'links' is an Entries namedtuple which contains two - dictionaries. One dictionary maps package identity to the links - that it delivers. The other dictionary, in this case, should be - empty. - - 'resolve_links' is a function which finds the real paths that a - path can resolve into, given a set of known links. - - 'orig_dep_vars' is the set of variants under which this - dependency exists.""" - - res = [] - # A dependency may be built using this dictionary of attributes. - # Seeding it with the type is necessary to create a Dependency - # object. - attrs = { - "type":"require" + if full_paths and (base_names or run_paths): + # this should never happen, as consumers should always + # construct PublishingDependency objects using either + # full_paths or a combination of base_names and + # run_paths. + raise InvalidPublishingDependency( + "A dependency was specified using full_paths={0} as " + "well as base_names={1} and run_paths={2}".format( + full_paths, base_names, run_paths + ) + ) + + self.base_names = sorted(base_names) + + if full_paths == None: + self.full_paths = [] + else: + self.full_paths = full_paths + + if proto_dir is None: + self.run_paths = sorted(run_paths) + # proto_dir is set to "" so that the proto_dir can be + # joined unconditionally with other paths. This makes + # the code path in _check_path simpler. + proto_dir = "" + else: + self.run_paths = sorted( + [self.make_relative(rp, proto_dir) for rp in run_paths] + ) + + attrs = {"{0}.type".format(self.DEPEND_DEBUG_PREFIX): kind} + if self.full_paths: + attrs[ + "{0}.fullpath".format(self.DEPEND_DEBUG_PREFIX) + ] = self.full_paths + else: + attrs.update( + { + "{0}.file".format( + self.DEPEND_DEBUG_PREFIX + ): self.base_names, + "{0}.path".format(self.DEPEND_DEBUG_PREFIX): self.run_paths, } - def process_path(path_to_check): - res = [] - # Find the potential real paths that path_to_check could - # resolve to. - res_pths, res_links = resolve_links( - path_to_check, delivered_files, links, - orig_dep_vars, attrs) - for res_pth, res_pfmri, nearest_fmri, res_vc, \ - res_via_links in res_pths: - p = self._check_path(res_pth, delivered_files) - if p: - res.append((p, res_vc)) - return res - - # if this is an expanded dependency, we iterate over the list of - # full paths - if self.full_paths: - for path_to_check in self.full_paths: - res.extend(process_path(path_to_check)) - - # otherwise, it's a dependency with run_path and base_names - # entries - else: - for bn in self.base_names: - for rp in self.run_paths: - path_to_check = os.path.normpath( - os.path.join(rp, bn)) - res.extend(process_path(path_to_check)) - return res - - def resolve_internal(self, delivered_files, links, resolve_links, *args, - **kwargs): - """Determines whether this dependency (self) can be satisfied by - the other items in the package which delivers it. A tuple of - two values is produced. The first is either None, meaning the - dependency was satisfied, or self.ERROR, meaning the dependency - wasn't totally satisfied by the delivered files. The second - value is the set of variants for which the dependency isn't - satisfied. - - 'delivered_files' is a dictionary which maps package identity - to the files the package delivers. - - 'links' is an Entries namedtuple which contains two - dictionaries. One dictionary maps package identity to the links - that it delivers. The other dictionary, in this case, should be - empty. - - 'resolve_links' is a function which finds the real paths a path - can resolve into given a set of known links. - - '*args' and '**kwargs' are used because subclasses may need - more information for their implementations. See pkg.flavor.elf - for an example of this.""" - - missing_vars = self.get_variant_combinations() - orig_dep_vars = self.get_variant_combinations() - for p, vc in self.possibly_delivered(delivered_files, links, - resolve_links, orig_dep_vars): - missing_vars.mark_as_satisfied(vc) - if missing_vars.is_satisfied(): - return None, missing_vars - return self.ERROR, missing_vars + ) + + Dependency.__init__(self, action, pkg_vars, proto_dir, attrs) + + def dep_key(self): + """Return the a value that represents the path of the + dependency. It must be hashable.""" + if self.full_paths: + return tuple(self.full_paths) + else: + return (tuple(self.base_names), tuple(self.run_paths)) + + def _check_path(self, path_to_check, delivered_files): + """Takes a dictionary of files that are known to exist, and + returns the path to the file that satisfies this dependency, or + None if no such delivered file exists.""" + + # Using normpath and realpath are ok here because the dependency + # is being checked against the files, directories, and links + # delivered in the proto area. + if path_to_check in delivered_files: + return path_to_check + norm_path = os.path.normpath( + os.path.join(self.proto_dir, path_to_check) + ) + if norm_path in delivered_files: + return norm_path + + real_path = os.path.realpath(norm_path) + if real_path in delivered_files: + return real_path + + return None + + def possibly_delivered( + self, delivered_files, links, resolve_links, orig_dep_vars + ): + """Finds a list of files which satisfy this dependency, and the + variants under which each file satisfies it. It takes into + account links and hardlinks. + + 'delivered_files' is a dictionary which maps paths to the + packages that deliver the path and the variants under which the + path is present. + + 'links' is an Entries namedtuple which contains two + dictionaries. One dictionary maps package identity to the links + that it delivers. The other dictionary, in this case, should be + empty. + + 'resolve_links' is a function which finds the real paths that a + path can resolve into, given a set of known links. + + 'orig_dep_vars' is the set of variants under which this + dependency exists.""" + + res = [] + # A dependency may be built using this dictionary of attributes. + # Seeding it with the type is necessary to create a Dependency + # object. + attrs = {"type": "require"} + + def process_path(path_to_check): + res = [] + # Find the potential real paths that path_to_check could + # resolve to. + res_pths, res_links = resolve_links( + path_to_check, delivered_files, links, orig_dep_vars, attrs + ) + for ( + res_pth, + res_pfmri, + nearest_fmri, + res_vc, + res_via_links, + ) in res_pths: + p = self._check_path(res_pth, delivered_files) + if p: + res.append((p, res_vc)) + return res + + # if this is an expanded dependency, we iterate over the list of + # full paths + if self.full_paths: + for path_to_check in self.full_paths: + res.extend(process_path(path_to_check)) + + # otherwise, it's a dependency with run_path and base_names + # entries + else: + for bn in self.base_names: + for rp in self.run_paths: + path_to_check = os.path.normpath(os.path.join(rp, bn)) + res.extend(process_path(path_to_check)) + return res + + def resolve_internal( + self, delivered_files, links, resolve_links, *args, **kwargs + ): + """Determines whether this dependency (self) can be satisfied by + the other items in the package which delivers it. A tuple of + two values is produced. The first is either None, meaning the + dependency was satisfied, or self.ERROR, meaning the dependency + wasn't totally satisfied by the delivered files. The second + value is the set of variants for which the dependency isn't + satisfied. + + 'delivered_files' is a dictionary which maps package identity + to the files the package delivers. + + 'links' is an Entries namedtuple which contains two + dictionaries. One dictionary maps package identity to the links + that it delivers. The other dictionary, in this case, should be + empty. + + 'resolve_links' is a function which finds the real paths a path + can resolve into given a set of known links. + + '*args' and '**kwargs' are used because subclasses may need + more information for their implementations. See pkg.flavor.elf + for an example of this.""" + + missing_vars = self.get_variant_combinations() + orig_dep_vars = self.get_variant_combinations() + for p, vc in self.possibly_delivered( + delivered_files, links, resolve_links, orig_dep_vars + ): + missing_vars.mark_as_satisfied(vc) + if missing_vars.is_satisfied(): + return None, missing_vars + return self.ERROR, missing_vars def insert_default_runpath(default_runpath, run_paths): - """Insert our default search path where the PD_DEFAULT_PATH token was - found, returning an updated list of run paths.""" - try: - new_paths = run_paths - index = run_paths.index(PD_DEFAULT_RUNPATH) - new_paths = run_paths[:index] + \ - default_runpath + run_paths[index + 1:] - if PD_DEFAULT_RUNPATH in new_paths: - raise MultipleDefaultRunpaths() - return new_paths - - except ValueError: - # no PD_DEFAULT_PATH token, so we override the - # whole default search path - return run_paths + """Insert our default search path where the PD_DEFAULT_PATH token was + found, returning an updated list of run paths.""" + try: + new_paths = run_paths + index = run_paths.index(PD_DEFAULT_RUNPATH) + new_paths = run_paths[:index] + default_runpath + run_paths[index + 1 :] + if PD_DEFAULT_RUNPATH in new_paths: + raise MultipleDefaultRunpaths() + return new_paths + + except ValueError: + # no PD_DEFAULT_PATH token, so we override the + # whole default search path + return run_paths + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/modules/flavor/depthlimitedmf.py b/src/modules/flavor/depthlimitedmf.py index 8895646d3..92f64836f 100644 --- a/src/modules/flavor/depthlimitedmf.py +++ b/src/modules/flavor/depthlimitedmf.py @@ -23,13 +23,14 @@ # Python 2 and Python 3 syntax. if __name__ != "__main__": - import pkg.flavor.base as base + import pkg.flavor.base as base import modulefinder import os import sys + if sys.version_info[0] == 3: - from importlib.machinery import EXTENSION_SUFFIXES + from importlib.machinery import EXTENSION_SUFFIXES # A string used as a component of the pkg.depend.runpath value as a special # token to determine where to insert the runpath that pkgdepend generates itself @@ -38,377 +39,382 @@ python_path = "PYTHONPATH" + class ModuleInfo(object): - """This class contains information about from where a python module - might be loaded.""" + """This class contains information about from where a python module + might be loaded.""" - def __init__(self, name, dirs, builtin=False): - """Build a ModuleInfo object. + def __init__(self, name, dirs, builtin=False): + """Build a ModuleInfo object. - The 'name' parameter is the name of the module. + The 'name' parameter is the name of the module. - The 'dirs' parameter is the list of directories where the module - might be found. + The 'dirs' parameter is the list of directories where the module + might be found. - The 'builtin' parameter sets whether the module is a python - builtin (like sys).""" + The 'builtin' parameter sets whether the module is a python + builtin (like sys).""" - self.name = name - self.builtin = builtin - self.patterns = [ "{0}.py", "{0}.pyc", "{0}.pyo", "{0}/__init__.py" ] - if sys.version_info[0] == 2: - self.patterns += [ - "{0}.so", "{0}module.so", "64/{0}.so", "64/{0}module.so" - ] - else: - self.patterns += \ - ["{{0}}{0}".format(s) for s in EXTENSION_SUFFIXES] + \ - ["64/{{0}}{0}".format(s) for s in EXTENSION_SUFFIXES] - self.dirs = sorted(dirs) + self.name = name + self.builtin = builtin + self.patterns = ["{0}.py", "{0}.pyc", "{0}.pyo", "{0}/__init__.py"] + if sys.version_info[0] == 2: + self.patterns += [ + "{0}.so", + "{0}module.so", + "64/{0}.so", + "64/{0}module.so", + ] + else: + self.patterns += [ + "{{0}}{0}".format(s) for s in EXTENSION_SUFFIXES + ] + ["64/{{0}}{0}".format(s) for s in EXTENSION_SUFFIXES] + self.dirs = sorted(dirs) - def make_package(self): - """Declare that this module is a package.""" + def make_package(self): + """Declare that this module is a package.""" - if self.dirs: - self.patterns = ["{0}/__init__.py"] - else: - self.patterns = [] + if self.dirs: + self.patterns = ["{0}/__init__.py"] + else: + self.patterns = [] - def get_package_dirs(self): - """Get the directories where this package might be defined.""" + def get_package_dirs(self): + """Get the directories where this package might be defined.""" - return [os.path.join(p, self.name) for p in self.dirs] + return [os.path.join(p, self.name) for p in self.dirs] - def get_file_names(self): - """Return all the file names under which this module might be - found.""" + def get_file_names(self): + """Return all the file names under which this module might be + found.""" - return [ pat.format(self.name) for pat in self.patterns ] + return [pat.format(self.name) for pat in self.patterns] - def __str__(self): - return "name:{0} suffixes:{1} dirs:{2}".format(self.name, - " ".join(self.patterns), len(self.dirs)) + def __str__(self): + return "name:{0} suffixes:{1} dirs:{2}".format( + self.name, " ".join(self.patterns), len(self.dirs) + ) if __name__ == "__main__": - try: - import pkg.misc as misc - import gettext - import locale - misc.setlocale(locale.LC_ALL, "") - gettext.install("pkg", "/usr/share/locale") - except ImportError: - pass + try: + import pkg.misc as misc + import gettext + import locale - class MultipleDefaultRunPaths(Exception): + misc.setlocale(locale.LC_ALL, "") + gettext.install("pkg", "/usr/share/locale") + except ImportError: + pass - def __str__(self): - return _( - "More than one $PKGDEPEND_RUNPATH token was set on " - "the same action in this manifest.") + class MultipleDefaultRunPaths(Exception): + def __str__(self): + return _( + "More than one $PKGDEPEND_RUNPATH token was set on " + "the same action in this manifest." + ) class DepthLimitedModuleFinder(modulefinder.ModuleFinder): - - def __init__(self, install_dir, *args, **kwargs): - """Produce a module finder that ignores PYTHONPATH and only - reports the direct imports of a module. - - run_paths as a keyword argument specifies a list of additional - paths to use when searching for modules.""" - - # ModuleFinder.__init__ doesn't expect run_paths - run_paths = kwargs.pop("run_paths", []) - - # Check to see whether a python path has been set. - if python_path in os.environ: - py_path = [ - os.path.normpath(fp) - for fp in os.environ[python_path].split(os.pathsep) - ] - else: - py_path = [] - - # Remove any paths that start with the defined python paths. - new_path = [ - fp - for fp in sys.path[1:] - if not self.startswith_path(fp, py_path) - ] - new_path.append(install_dir) - - if run_paths: - if __name__ != "__main__": - # add our detected runpath into the - # user-supplied one (if any) - new_path = base.insert_default_runpath(new_path, - run_paths) - else: - # This is a copy of the above function call. - # insert our default search path where the - # PD_DEFAULT_RUNPATH token was found - try: - index = run_paths.index( - PD_DEFAULT_RUNPATH) - run_paths = run_paths[:index] + \ - new_path + run_paths[index + 1:] - if PD_DEFAULT_RUNPATH in run_paths: - raise MultipleDefaultRunPaths() - except ValueError: - # no PD_DEFAULT_PATH token, so we - # override the whole default search path - pass - new_path = run_paths - - modulefinder.ModuleFinder.__init__(self, path=new_path, - *args, **kwargs) - - @staticmethod - def startswith_path(path, lst): - for l in lst: - if path.startswith(l): - return True - return False - - def run_script(self, pathname): - """Find all the modules the module at pathname directly - imports.""" - - fp = open(pathname, "r") - return self.load_module('__main__', fp, pathname) - - def load_module(self, fqname, fp, pathname): - """This code has been slightly modified from the function of - the parent class. Specifically, it checks the current depth - of the loading and halts if it exceeds the depth that was given - to run_script.""" - - self.msgin(2, "load_module", fqname, fp and "fp", pathname) - co = compile(fp.read()+'\n', pathname, 'exec') - m = self.add_module(fqname) - m.__file__ = pathname - res = [] - if co: - if self.replace_paths: - co = self.replace_paths_in_code(co) - m.__code__ = co - try: - res.extend(self.scan_code(co, m)) - except ImportError as msg: - self.msg(2, "ImportError:", str(msg), fqname, - pathname) - self._add_badmodule(fqname, m) - - self.msgout(2, "load_module ->", m) - return res - - def scan_code(self, co, m): - """Scan the code looking for import statements.""" - - res = [] - code = co.co_code - if sys.version_info >= (2, 5) and sys.version_info < (3, 6): - # Python 3.6's modulefinder.py got rid of - # scan_opcodes_25() and renamed scan_opcodes_25() - # to scan_opcodes(). Previously old scan_opcodes() - # was for Python 2.4 and earlier. - scanner = self.scan_opcodes_25 - else: - scanner = self.scan_opcodes - for what, args in scanner(co): - if what == "store": - name, = args - m.globalnames[name] = 1 - elif what in ("import", "absolute_import"): - fromlist, name = args - have_star = 0 - if fromlist is not None: - if "*" in fromlist: - have_star = 1 - fromlist = [ - f for f in fromlist if f != "*" - ] - if what == "absolute_import": - level = 0 - else: - level = -1 - res.extend(self._safe_import_hook(name, m, - fromlist, level=level)) - elif what == "relative_import": - level, fromlist, name = args - if name: - res.extend(self._safe_import_hook(name, - m, fromlist, level=level)) - else: - parent = self.determine_parent(m, - level=level) - res.extend(self._safe_import_hook( - parent.__name__, None, fromlist, - level=0)) - else: - # We don't expect anything else from the - # generator. - raise RuntimeError(what) - - for c in co.co_consts: - if isinstance(c, type(co)): - res.extend(self.scan_code(c, m)) - return res - - - def _safe_import_hook(self, name, caller, fromlist, level=-1): - """Wrapper for self.import_hook() that won't raise ImportError. - """ - - res = [] - if name in self.badmodules: - self._add_badmodule(name, caller) - return [] + def __init__(self, install_dir, *args, **kwargs): + """Produce a module finder that ignores PYTHONPATH and only + reports the direct imports of a module. + + run_paths as a keyword argument specifies a list of additional + paths to use when searching for modules.""" + + # ModuleFinder.__init__ doesn't expect run_paths + run_paths = kwargs.pop("run_paths", []) + + # Check to see whether a python path has been set. + if python_path in os.environ: + py_path = [ + os.path.normpath(fp) + for fp in os.environ[python_path].split(os.pathsep) + ] + else: + py_path = [] + + # Remove any paths that start with the defined python paths. + new_path = [ + fp for fp in sys.path[1:] if not self.startswith_path(fp, py_path) + ] + new_path.append(install_dir) + + if run_paths: + if __name__ != "__main__": + # add our detected runpath into the + # user-supplied one (if any) + new_path = base.insert_default_runpath(new_path, run_paths) + else: + # This is a copy of the above function call. + # insert our default search path where the + # PD_DEFAULT_RUNPATH token was found try: - res.extend(self.import_hook(name, caller, level=level)) - except ImportError as msg: - self.msg(2, "ImportError:", str(msg)) - self._add_badmodule(name, caller) + index = run_paths.index(PD_DEFAULT_RUNPATH) + run_paths = ( + run_paths[:index] + new_path + run_paths[index + 1 :] + ) + if PD_DEFAULT_RUNPATH in run_paths: + raise MultipleDefaultRunPaths() + except ValueError: + # no PD_DEFAULT_PATH token, so we + # override the whole default search path + pass + new_path = run_paths + + modulefinder.ModuleFinder.__init__(self, path=new_path, *args, **kwargs) + + @staticmethod + def startswith_path(path, lst): + for l in lst: + if path.startswith(l): + return True + return False + + def run_script(self, pathname): + """Find all the modules the module at pathname directly + imports.""" + + fp = open(pathname, "r") + return self.load_module("__main__", fp, pathname) + + def load_module(self, fqname, fp, pathname): + """This code has been slightly modified from the function of + the parent class. Specifically, it checks the current depth + of the loading and halts if it exceeds the depth that was given + to run_script.""" + + self.msgin(2, "load_module", fqname, fp and "fp", pathname) + co = compile(fp.read() + "\n", pathname, "exec") + m = self.add_module(fqname) + m.__file__ = pathname + res = [] + if co: + if self.replace_paths: + co = self.replace_paths_in_code(co) + m.__code__ = co + try: + res.extend(self.scan_code(co, m)) + except ImportError as msg: + self.msg(2, "ImportError:", str(msg), fqname, pathname) + self._add_badmodule(fqname, m) + + self.msgout(2, "load_module ->", m) + return res + + def scan_code(self, co, m): + """Scan the code looking for import statements.""" + + res = [] + code = co.co_code + if sys.version_info >= (2, 5) and sys.version_info < (3, 6): + # Python 3.6's modulefinder.py got rid of + # scan_opcodes_25() and renamed scan_opcodes_25() + # to scan_opcodes(). Previously old scan_opcodes() + # was for Python 2.4 and earlier. + scanner = self.scan_opcodes_25 + else: + scanner = self.scan_opcodes + for what, args in scanner(co): + if what == "store": + (name,) = args + m.globalnames[name] = 1 + elif what in ("import", "absolute_import"): + fromlist, name = args + have_star = 0 + if fromlist is not None: + if "*" in fromlist: + have_star = 1 + fromlist = [f for f in fromlist if f != "*"] + if what == "absolute_import": + level = 0 else: - if fromlist: - for sub in fromlist: - if sub in self.badmodules: - self._add_badmodule(sub, caller) - continue - res.extend(self.import_hook(name, - caller, [sub], level=level)) - return res - - def import_hook(self, name, caller=None, fromlist=None, level=-1): - """Find all the modules that importing name will import.""" - - # Special handling for os.path is needed because the os module - # manipulates sys.modules directly to provide both os and - # os.path. - if name == "os.path": - self.msg(2, "bypassing os.path import - importing os " - "instead", name, caller, fromlist, level) - name = "os" - - self.msg(3, "import_hook", name, caller, fromlist, level) - parent = self.determine_parent(caller, level=level) - q, tail = self.find_head_package(parent, name) - if not tail: - # If q is a builtin module, don't report it because it - # doesn't live in the normal module space and it's part - # of python itself, which is handled by a different - # kind of dependency. - if isinstance(q, ModuleInfo) and q.builtin: - return [] - elif isinstance(q, modulefinder.Module): - name = q.__name__ - path = q.__path__ - # some Module objects don't get a path - if not path: - if name in sys.builtin_module_names or \ - name == "__future__": - return [ModuleInfo(name, [], - builtin=True)] - else: - return [ModuleInfo(name, [])] - return [ModuleInfo(name, path)] - else: - return [q] - res = self.load_tail(q, tail) - q.make_package() - res.append(q) - return res - - def import_module(self, partname, fqname, parent): - """Find where this module lives relative to its parent.""" - - parent_dirs = None - self.msgin(3, "import_module", partname, fqname, parent) - try: - m = self.modules[fqname] - except KeyError: - pass + level = -1 + res.extend( + self._safe_import_hook(name, m, fromlist, level=level) + ) + elif what == "relative_import": + level, fromlist, name = args + if name: + res.extend( + self._safe_import_hook(name, m, fromlist, level=level) + ) else: - self.msgout(3, "import_module ->", m) - return m - if fqname in self.badmodules: - self.msgout(3, "import_module -> None") - return None - if parent: - if not parent.dirs: - self.msgout(3, "import_module -> None") - return None - else: - parent_dirs = parent.get_package_dirs() - try: - mod = self.find_module(partname, parent_dirs, parent) - except ImportError: - self.msgout(3, "import_module ->", None) - return None - return mod - - def find_module(self, name, path, parent=None): - """Calculate the potential paths on the file system where the - module could be found.""" - + parent = self.determine_parent(m, level=level) + res.extend( + self._safe_import_hook( + parent.__name__, None, fromlist, level=0 + ) + ) + else: + # We don't expect anything else from the + # generator. + raise RuntimeError(what) + + for c in co.co_consts: + if isinstance(c, type(co)): + res.extend(self.scan_code(c, m)) + return res + + def _safe_import_hook(self, name, caller, fromlist, level=-1): + """Wrapper for self.import_hook() that won't raise ImportError.""" + + res = [] + if name in self.badmodules: + self._add_badmodule(name, caller) + return [] + try: + res.extend(self.import_hook(name, caller, level=level)) + except ImportError as msg: + self.msg(2, "ImportError:", str(msg)) + self._add_badmodule(name, caller) + else: + if fromlist: + for sub in fromlist: + if sub in self.badmodules: + self._add_badmodule(sub, caller) + continue + res.extend( + self.import_hook(name, caller, [sub], level=level) + ) + return res + + def import_hook(self, name, caller=None, fromlist=None, level=-1): + """Find all the modules that importing name will import.""" + + # Special handling for os.path is needed because the os module + # manipulates sys.modules directly to provide both os and + # os.path. + if name == "os.path": + self.msg( + 2, + "bypassing os.path import - importing os " "instead", + name, + caller, + fromlist, + level, + ) + name = "os" + + self.msg(3, "import_hook", name, caller, fromlist, level) + parent = self.determine_parent(caller, level=level) + q, tail = self.find_head_package(parent, name) + if not tail: + # If q is a builtin module, don't report it because it + # doesn't live in the normal module space and it's part + # of python itself, which is handled by a different + # kind of dependency. + if isinstance(q, ModuleInfo) and q.builtin: + return [] + elif isinstance(q, modulefinder.Module): + name = q.__name__ + path = q.__path__ + # some Module objects don't get a path if not path: if name in sys.builtin_module_names or name == "__future__": - return ModuleInfo(name, [], builtin=True) - path = self.path - return ModuleInfo(name, path) - - def load_tail(self, q, tail): - """Determine where each component of a multilevel import would - be found on the file system.""" - - self.msgin(4, "load_tail", q, tail) - res = [] - name = q.name - cur_parent = q - while tail: - i = tail.find('.') - if i < 0: - i = len(tail) - head, tail = tail[:i], tail[i+1:] - new_name = "{0}.{1}".format(name, head) - r = self.import_module(head, new_name, cur_parent) - res.append(r) - name = new_name - cur_parent = r - - # All but the last module found must be packages because they - # contained other packages. - for i in range(0, len(res) - 1): - res[i].make_package() - - self.msgout(4, "load_tail ->", q) - return res + return [ModuleInfo(name, [], builtin=True)] + else: + return [ModuleInfo(name, [])] + return [ModuleInfo(name, path)] + else: + return [q] + res = self.load_tail(q, tail) + q.make_package() + res.append(q) + return res + + def import_module(self, partname, fqname, parent): + """Find where this module lives relative to its parent.""" + + parent_dirs = None + self.msgin(3, "import_module", partname, fqname, parent) + try: + m = self.modules[fqname] + except KeyError: + pass + else: + self.msgout(3, "import_module ->", m) + return m + if fqname in self.badmodules: + self.msgout(3, "import_module -> None") + return None + if parent: + if not parent.dirs: + self.msgout(3, "import_module -> None") + return None + else: + parent_dirs = parent.get_package_dirs() + try: + mod = self.find_module(partname, parent_dirs, parent) + except ImportError: + self.msgout(3, "import_module ->", None) + return None + return mod + + def find_module(self, name, path, parent=None): + """Calculate the potential paths on the file system where the + module could be found.""" + + if not path: + if name in sys.builtin_module_names or name == "__future__": + return ModuleInfo(name, [], builtin=True) + path = self.path + return ModuleInfo(name, path) + + def load_tail(self, q, tail): + """Determine where each component of a multilevel import would + be found on the file system.""" + + self.msgin(4, "load_tail", q, tail) + res = [] + name = q.name + cur_parent = q + while tail: + i = tail.find(".") + if i < 0: + i = len(tail) + head, tail = tail[:i], tail[i + 1 :] + new_name = "{0}.{1}".format(name, head) + r = self.import_module(head, new_name, cur_parent) + res.append(r) + name = new_name + cur_parent = r + + # All but the last module found must be packages because they + # contained other packages. + for i in range(0, len(res) - 1): + res[i].make_package() + + self.msgout(4, "load_tail ->", q) + return res if __name__ == "__main__": - """Usage: - depthlimitedmf.py
      -""".format(title, title, title) - - sel = "class=\"selected\"" - for r in map_regions: - msg += """
    • {3}
    • """.format(sel, title, r, r) - sel = "" - - msg += """\ +""".format( + title, title, title + ) + + sel = 'class="selected"' + for r in map_regions: + msg += """
    • {3}
    • """.format( + sel, title, r, r + ) + sel = "" + + msg += """\
    """ - for r in map_regions: - url = "chs=440x220&cht=t&chtm={0}&chld={1}&chd=t:{2}&chco=ffffff,b0d2ff,013476".format(r, chart_ccs, chart_data) - print("".format(url)) - fname = retrieve_chart("http://chart.apis.google.com/chart?{0}".format(url), - "{0}-{1}-map".format(title, r)) - msg += """
    {3}
    """.format(title, r, fname, title) - - msg += """\ + for r in map_regions: + url = "chs=440x220&cht=t&chtm={0}&chld={1}&chd=t:{2}&chco=ffffff,b0d2ff,013476".format( + r, chart_ccs, chart_data + ) + print("".format(url)) + fname = retrieve_chart( + "http://chart.apis.google.com/chart?{0}".format(url), + "{0}-{1}-map".format(title, r), + ) + msg += """
    {3}
    """.format( + title, r, fname, title + ) + + msg += """\
    Color intensity linear in log of requests.""" + print(msg) + if summary_file: + print(msg, file=summary_file) - print(msg) - if summary_file: - print(msg, file=summary_file) - -def report_by_raw_agent(data, title, summary_file = None): - rf = prefix_raw_open(title, "country") - for i, n in (sorted(data.items(), key=lambda k_v: (k_v[1], k_v[0]))): - print(i, n, file=rf) - rf.close() +def report_by_raw_agent(data, title, summary_file=None): + rf = prefix_raw_open(title, "country") + for i, n in sorted(data.items(), key=lambda k_v: (k_v[1], k_v[0])): + print(i, n, file=rf) + rf.close() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/log-scripts/an_search.py b/src/util/log-scripts/an_search.py index b361b61e6..4cce1da8b 100644 --- a/src/util/log-scripts/an_search.py +++ b/src/util/log-scripts/an_search.py @@ -51,26 +51,35 @@ pkg_pat = re.compile(r"/search/(?P\d+)/(?P.*)") -def emit_search_report(summary_file, searchtype, label, results): - print("
    ")
    -        for i, n in results:
    -                print(i, n)
    -        print("
    ") - if summary_file: - print(""" +def emit_search_report(summary_file, searchtype, label, results): + print("
    ")
    +    for i, n in results:
    +        print(i, n)
    +    print("
    ") + + if summary_file: + print( + """

    Top 25 {searchtype} searches

    - """.format(label=label, searchtype=searchtype), file=summary_file) - - for i, n in results[:25]: - print("".format(i, n), - file=summary_file) - - print("
    Term{label}
    {0}{1}
    ", file=summary_file) - print(""" + """.format( + label=label, searchtype=searchtype + ), + file=summary_file, + ) + + for i, n in results[:25]: + print( + "{0}{1}".format(i, n), + file=summary_file, + ) + + print("", file=summary_file) + print( + """ - """.format(label=label, searchtype=searchtype), file=summary_file) - + """.format( + label=label, searchtype=searchtype + ), + file=summary_file, + ) def report_search_by_failure(): - sfi = sorted(search_by_failure.items(), reverse=True, key=lambda k_v: (k_v[1],k_v[0])) - emit_search_report(summary_file, "failed", "Misses", sfi) + sfi = sorted( + search_by_failure.items(), + reverse=True, + key=lambda k_v: (k_v[1], k_v[0]), + ) + emit_search_report(summary_file, "failed", "Misses", sfi) def report_search_by_success(): - ssi = sorted(search_by_success.items(), reverse=True, key=lambda k_v1: (k_v1[1],k_v1[0])) - emit_search_report(summary_file, "successful", "Hits", ssi) + ssi = sorted( + search_by_success.items(), + reverse=True, + key=lambda k_v1: (k_v1[1], k_v1[0]), + ) + emit_search_report(summary_file, "successful", "Hits", ssi) def count_search(mg, d): - try: - search_by_date[d.date().isoformat()] += 1 - except KeyError: - search_by_date[d.date().isoformat()] = 1 - try: - search_by_ip[mg["ip"]] += 1 - except KeyError: - search_by_ip[mg["ip"]] = 1 - - - pm = pkg_pat.search(mg["uri"]) - if pm != None: - pg = pm.groupdict() - - kw = unquote(pg["keywords"]) - - if mg["response"] == "200": - if mg["subcode"] == "-": - # A zero-length response is a failed search - # (4 Aug - ...). Consequence of the migration - # to CherryPy; will be unneeded once - # http://defect.opensolaris.org/bz/show_bug.cgi?id=3238 - # is fixed. - try: - search_by_failure[kw] += 1 - except KeyError: - search_by_failure[kw] = 1 - else: - try: - search_by_success[kw] += 1 - except KeyError: - search_by_success[kw] = 1 - elif mg["response"] == "404": - try: - search_by_failure[kw] += 1 - except KeyError: - search_by_failure[kw] = 1 - - # XXX should measure downtime via 503, other failure responses - - - agent = pkg_agent_pat.search(mg["agent"]) - if agent == None: - return - - ag = agent.groupdict() + try: + search_by_date[d.date().isoformat()] += 1 + except KeyError: + search_by_date[d.date().isoformat()] = 1 + try: + search_by_ip[mg["ip"]] += 1 + except KeyError: + search_by_ip[mg["ip"]] = 1 + + pm = pkg_pat.search(mg["uri"]) + if pm != None: + pg = pm.groupdict() + + kw = unquote(pg["keywords"]) + + if mg["response"] == "200": + if mg["subcode"] == "-": + # A zero-length response is a failed search + # (4 Aug - ...). Consequence of the migration + # to CherryPy; will be unneeded once + # http://defect.opensolaris.org/bz/show_bug.cgi?id=3238 + # is fixed. + try: + search_by_failure[kw] += 1 + except KeyError: + search_by_failure[kw] = 1 + else: + try: + search_by_success[kw] += 1 + except KeyError: + search_by_success[kw] = 1 + elif mg["response"] == "404": + try: + search_by_failure[kw] += 1 + except KeyError: + search_by_failure[kw] = 1 + + # XXX should measure downtime via 503, other failure responses + + agent = pkg_agent_pat.search(mg["agent"]) + if agent == None: + return + + ag = agent.groupdict() + + try: + search_by_arch[ag["arch"]] += 1 + except KeyError: + search_by_arch[ag["arch"]] = 1 - try: - search_by_arch[ag["arch"]] += 1 - except KeyError: - search_by_arch[ag["arch"]] = 1 opts, args = getopt.getopt(sys.argv[1:], "a:b:sw:") for opt, arg in opts: - if opt == "-a": - try: - after = datetime.datetime(*(time.strptime(arg, "%Y-%b-%d")[0:6])) - except ValueError: - after = datetime.datetime(*(time.strptime(arg, "%Y-%m-%d")[0:6])) + if opt == "-a": + try: + after = datetime.datetime(*(time.strptime(arg, "%Y-%b-%d")[0:6])) + except ValueError: + after = datetime.datetime(*(time.strptime(arg, "%Y-%m-%d")[0:6])) - if opt == "-b": - before = arg + if opt == "-b": + before = arg - if opt == "-s": - summary_file = prefix_summary_open("search") + if opt == "-s": + summary_file = prefix_summary_open("search") - if opt == "-w": - active_window = arg + if opt == "-w": + active_window = arg host_cache_set_file_name() host_cache_load() @@ -184,49 +203,49 @@ def count_search(mg, d): lastdatetime = None for l in fileinput.input(args): - m = comb_log_pat.search(l) - if not m: - continue + m = comb_log_pat.search(l) + if not m: + continue - mg = m.groupdict() + mg = m.groupdict() - d = None + d = None - if lastdatetime and mg["date"] == lastdate: - d = lastdatetime - else: - d = datetime.datetime(*(time.strptime(mg["date"], "%d/%b/%Y")[0:6])) - lastdate = mg["date"] - lastdatetime = d + if lastdatetime and mg["date"] == lastdate: + d = lastdatetime + else: + d = datetime.datetime(*(time.strptime(mg["date"], "%d/%b/%Y")[0:6])) + lastdate = mg["date"] + lastdatetime = d - if after and d < after: - continue + if after and d < after: + continue - count_search(mg, d) + count_search(mg, d) host_cache_save() search_by_country = ip_to_country(search_by_ip) -report_section_begin("Search", summary_file = summary_file) -report_cols_begin(summary_file = summary_file) -report_col_begin("l", summary_file = summary_file) -report_by_date(search_by_date, "search", summary_file = summary_file) -report_by_ip(search_by_ip, "search", summary_file = summary_file) -report_col_end("l", summary_file = summary_file) -report_col_begin("r", summary_file = summary_file) -report_by_country(search_by_country, "search", summary_file = summary_file) -report_col_end("r", summary_file = summary_file) -report_cols_end(summary_file = summary_file) - -report_cols_begin(summary_file = summary_file) -report_col_begin("l", summary_file = summary_file) +report_section_begin("Search", summary_file=summary_file) +report_cols_begin(summary_file=summary_file) +report_col_begin("l", summary_file=summary_file) +report_by_date(search_by_date, "search", summary_file=summary_file) +report_by_ip(search_by_ip, "search", summary_file=summary_file) +report_col_end("l", summary_file=summary_file) +report_col_begin("r", summary_file=summary_file) +report_by_country(search_by_country, "search", summary_file=summary_file) +report_col_end("r", summary_file=summary_file) +report_cols_end(summary_file=summary_file) + +report_cols_begin(summary_file=summary_file) +report_col_begin("l", summary_file=summary_file) report_search_by_failure() -report_col_end("l", summary_file = summary_file) -report_col_begin("r", summary_file = summary_file) +report_col_end("l", summary_file=summary_file) +report_col_begin("r", summary_file=summary_file) report_search_by_success() -report_col_end("r", summary_file = summary_file) -report_cols_end(summary_file = summary_file) -report_section_end(summary_file = summary_file) +report_col_end("r", summary_file=summary_file) +report_cols_end(summary_file=summary_file) +report_section_end(summary_file=summary_file) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/log-scripts/config.py b/src/util/log-scripts/config.py index 209d30bb6..37275ec6a 100644 --- a/src/util/log-scripts/config.py +++ b/src/util/log-scripts/config.py @@ -27,15 +27,17 @@ import os from six.moves import configparser -CFGFILE="site-config" +CFGFILE = "site-config" + def get(option, default=None): - cfg = configparser.ConfigParser() - cfg.read(CFGFILE) - value = cfg.get("default", option) - if not value: - return default - return value + cfg = configparser.ConfigParser() + cfg.read(CFGFILE) + value = cfg.get("default", option) + if not value: + return default + return value + # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/log-scripts/in_footer.py b/src/util/log-scripts/in_footer.py index 883e7917b..ae0281eef 100644 --- a/src/util/log-scripts/in_footer.py +++ b/src/util/log-scripts/in_footer.py @@ -34,4 +34,4 @@ print(footer) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/log-scripts/in_header.py b/src/util/log-scripts/in_header.py index eb6627aad..6ab4e2956 100644 --- a/src/util/log-scripts/in_header.py +++ b/src/util/log-scripts/in_header.py @@ -111,9 +111,11 @@

    {1} {2} Statistics

    -""".format(hostname, hostname, hostname) +""".format( + hostname, hostname, hostname +) print(header) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/log-scripts/log.py b/src/util/log-scripts/log.py index 2c10747fb..7eebff63c 100644 --- a/src/util/log-scripts/log.py +++ b/src/util/log-scripts/log.py @@ -26,7 +26,9 @@ from __future__ import division from __future__ import print_function -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() import os import getopt import re @@ -44,91 +46,96 @@ def process(l): - """Process one Apache common log line.""" - ex = r"([\d\.]*) - - \[([^\]]*)\] \"([A-Z]*) (.*) HTTP/1\..\" (\d\d\d) (\d*)" - m = re.match(ex, l) + """Process one Apache common log line.""" + ex = ( + r"([\d\.]*) - - \[([^\]]*)\] \"([A-Z]*) (.*) HTTP/1\..\" (\d\d\d) (\d*)" + ) + m = re.match(ex, l) + + totals["dl"] += int(m.group(6)) + hosts[m.group(1)] = 1 + + if m.group(5) == "200": + try: + codes_200[m.group(1)].append(int(m.group(6))) + except KeyError: + codes_200[m.group(1)] = [int(m.group(6))] + + elif m.group(5) == "206": + try: + codes_206[m.group(1)].append(int(m.group(6))) + except KeyError: + codes_206[m.group(1)] = [int(m.group(6))] + + else: + try: + codes_other[m.group(1)].append(m.group(5)) + except KeyError: + codes_other[m.group(1)] = [m.group(5)] - totals["dl"] += int(m.group(6)) - hosts[m.group(1)] = 1 - if m.group(5) == "200": - try: - codes_200[m.group(1)].append(int(m.group(6))) - except KeyError: - codes_200[m.group(1)] = [ int(m.group(6)) ] +def dlunits(codes, size): + n = 0 - elif m.group(5) == "206": - try: - codes_206[m.group(1)].append(int(m.group(6))) - except KeyError: - codes_206[m.group(1)] = [ int(m.group(6)) ] + for k in codes.keys(): + if sum(codes[k]) >= size: + n += 1 - else: - try: - codes_other[m.group(1)].append(m.group(5)) - except KeyError: - codes_other[m.group(1)] = [m.group(5)] + return n -def dlunits(codes, size): - n = 0 - for k in codes.keys(): - if sum(codes[k]) >= size: - n +=1 +def dls_linked(codes_200, codes_206, size): + linked = 0 + for k in codes_206.keys(): + if k in codes_200.keys(): + total = sum(codes_200[k]) + sum(codes_206[k]) + if total > size: + linked += total // size + + if total > 10 * size: + try: + host = (socket.gethostbyaddr(k)[0],) + print(host) + except: + pass - return n + return linked -def dls_linked(codes_200, codes_206, size): - linked = 0 - for k in codes_206.keys(): - if k in codes_200.keys(): - total = sum(codes_200[k]) + sum(codes_206[k]) - if total > size: - linked += total//size - - if total > 10 * size: - try: - host = socket.gethostbyaddr(k)[0], - print(host) - except: - pass - - return linked if __name__ == "__main__": - opts, pargs = getopt.getopt(sys.argv[1:], "f:s:") + opts, pargs = getopt.getopt(sys.argv[1:], "f:s:") - size = None - fname = None + size = None + fname = None - for opt, arg in opts: - if opt == "-f": - fname = arg - if opt == "-s": - size = int(arg) + for opt, arg in opts: + if opt == "-f": + fname = arg + if opt == "-s": + size = int(arg) - assert not fname == None + assert not fname == None - lg = open(fname) + lg = open(fname) - for l in lg.readlines(): - process(l) + for l in lg.readlines(): + process(l) - print("distinct hosts: {0:d}".format(len(hosts.keys()))) - print("200 requests: {0:d}".format(len(codes_200.keys()))) - print("206 requests: {0:d}".format(len(codes_206.keys()))) - print("other requests: {0:d}".format(len(codes_other.keys()))) + print("distinct hosts: {0:d}".format(len(hosts.keys()))) + print("200 requests: {0:d}".format(len(codes_200.keys()))) + print("206 requests: {0:d}".format(len(codes_206.keys()))) + print("other requests: {0:d}".format(len(codes_other.keys()))) - if not size: - sys.exit(0) + if not size: + sys.exit(0) - print("200 units: {0:d}".format(dlunits(codes_200, size))) - print("206 units: {0:d}".format(dlunits(codes_206, size))) + print("200 units: {0:d}".format(dlunits(codes_200, size))) + print("206 units: {0:d}".format(dlunits(codes_206, size))) - print("linked units: {0:d}".format(dls_linked(codes_200, codes_206, size))) + print("linked units: {0:d}".format(dls_linked(codes_200, codes_206, size))) - print("total units: {0:d}".format(totals["dl"] // size)) + print("total units: {0:d}".format(totals["dl"] // size)) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/log-scripts/translate.py b/src/util/log-scripts/translate.py index c9e6ecc0d..e7542fbc2 100644 --- a/src/util/log-scripts/translate.py +++ b/src/util/log-scripts/translate.py @@ -37,68 +37,68 @@ # 1.2.3.4 - - [12/Aug/2008:19:21:28 -0700] "GET /manifest/0/SUNWkos@0.5.11%2C5.11-0.94%3A20080721T212150Z HTTP/1.1" 200 19748 "-" "pkg/d974bb176266 (sunos i86pc; 5.11 snv_86; full)" if len(sys.argv) != 3: - print("Usage: {0} ".format(sys.argv[0])) - sys.exit(2) + print("Usage: {0} ".format(sys.argv[0])) + sys.exit(2) infile = open(sys.argv[1], "r") outfile = open(sys.argv[2], "w") gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE) -ops = ["1p.png", "filelist", "catalog", "manifest", "search", "file"] +ops = ["1p.png", "filelist", "catalog", "manifest", "search", "file"] cnt = {} for x in ops: - cnt[x] = 0 + cnt[x] = 0 while True: - line = infile.readline() - if len(line) == 0: # EOF - break - - #print("line: [{0}]".format(line)) - - fields = line.split() - (ip, d, fullop) = (fields[0], fields[3], fields[6]) - del fields - - # Get country code and translate ip -> md5 of ip - cc = gi.country_code_by_addr(ip) - ip = md5.md5(ip) - ip = ip.hexdigest() - - # Goofy date -> UTS - d = time.mktime(time.strptime(d[1:], "%d/%b/%Y:%H:%M:%S")) - d = str(d).split(".")[0] - - # Figure out op and opargs - opflds = fullop.split("/") - op = opflds[1] - if "1p.png" in op: - op = op[0:op.find("?")] - if op not in ops: - continue - # only interested in catalog/0 operations - if op == "catalog" and opflds[2] != "0": - continue - opargs = "" - if op == "search": - opargs = "/".join(opflds[3:]) - if op == "file": - opargs = opflds[3] - - # TODO: also need to grab size - - cnt[op] += 1 - - print("{0} {1} {2} {3} {4}".format(ip, cc, d, op, opargs), file=outfile) + line = infile.readline() + if len(line) == 0: # EOF + break + + # print("line: [{0}]".format(line)) + + fields = line.split() + (ip, d, fullop) = (fields[0], fields[3], fields[6]) + del fields + + # Get country code and translate ip -> md5 of ip + cc = gi.country_code_by_addr(ip) + ip = md5.md5(ip) + ip = ip.hexdigest() + + # Goofy date -> UTS + d = time.mktime(time.strptime(d[1:], "%d/%b/%Y:%H:%M:%S")) + d = str(d).split(".")[0] + + # Figure out op and opargs + opflds = fullop.split("/") + op = opflds[1] + if "1p.png" in op: + op = op[0 : op.find("?")] + if op not in ops: + continue + # only interested in catalog/0 operations + if op == "catalog" and opflds[2] != "0": + continue + opargs = "" + if op == "search": + opargs = "/".join(opflds[3:]) + if op == "file": + opargs = opflds[3] + + # TODO: also need to grab size + + cnt[op] += 1 + + print("{0} {1} {2} {3} {4}".format(ip, cc, d, op, opargs), file=outfile) infile.close() outfile.close() for x in ops: - print("# {0}: {1:d}".format(x, cnt[x])) + print("# {0}: {1:d}".format(x, cnt[x])) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/pkgdiff.py b/src/util/publish/pkgdiff.py index e9f8a1f41..8d0130914 100644 --- a/src/util/publish/pkgdiff.py +++ b/src/util/publish/pkgdiff.py @@ -25,7 +25,9 @@ # from __future__ import print_function -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() import getopt import gettext import locale @@ -45,354 +47,392 @@ from itertools import product from pkg.client.pkgdefs import EXIT_OK, EXIT_OOPS, EXIT_BADOPT, EXIT_PARTIAL + def usage(errmsg="", exitcode=EXIT_BADOPT): - """Emit a usage message and optionally prefix it with a more specific - error message. Causes program to exit.""" + """Emit a usage message and optionally prefix it with a more specific + error message. Causes program to exit.""" - if errmsg: - print("pkgdiff: {0}".format(errmsg), file=sys.stderr) + if errmsg: + print("pkgdiff: {0}".format(errmsg), file=sys.stderr) - print(_("""\ + print( + _( + """\ Usage: pkgdiff [-i attribute]... [-o attribute] [-t action_name[,action_name]...]... - [-v name=value]... (file1 | -) (file2 | -)""")) - sys.exit(exitcode) + [-v name=value]... (file1 | -) (file2 | -)""" + ) + ) + sys.exit(exitcode) + def error(text, exitcode=EXIT_PARTIAL): - """Emit an error message prefixed by the command name """ + """Emit an error message prefixed by the command name""" - print("pkgdiff: {0}".format(text), file=sys.stderr) + print("pkgdiff: {0}".format(text), file=sys.stderr) - if exitcode != None: - sys.exit(exitcode) + if exitcode != None: + sys.exit(exitcode) -def main_func(): - ignoreattrs = [] - onlyattrs = [] - onlytypes = [] - varattrs = defaultdict(set) - cmp_policy = CMP_ALL - - try: - opts, pargs = getopt.getopt(sys.argv[1:], "i:o:t:uv:?", ["help"]) - for opt, arg in opts: - if opt == "-i": - ignoreattrs.append('timestamp') - ignoreattrs.append(arg) - if arg == 'hash': - ignoreattrs.extend([ - 'chash', 'pkg.content-hash', - 'elfhash', 'timestamp']) - if arg == 'size': - ignoreattrs.extend([ - 'pkg.csize', 'pkg.size']) - elif opt == "-o": - onlyattrs.append(arg) - elif opt == "-t": - onlytypes.extend(arg.split(",")) - elif opt == "-u": - cmp_policy = CMP_UNSIGNED - elif opt == "-v": - args = arg.split("=") - if len(args) != 2: - usage(_("variant option incorrect {0}").format( - arg)) - if not args[0].startswith("variant."): - args[0] = "variant." + args[0] - varattrs[args[0]].add(args[1]) - elif opt in ("--help", "-?"): - usage(exitcode=EXIT_OK) - - except getopt.GetoptError as e: - usage(_("illegal global option -- {0}").format(e.opt)) - - if len(pargs) != 2: - usage(_("two manifest arguments are required")) - - if (pargs[0] == "-" and pargs[1] == "-"): - usage(_("only one manifest argument can be stdin")) - - if ignoreattrs and onlyattrs: - usage(_("-i and -o options may not be used at the same time.")) - - for v in varattrs: - if len(varattrs[v]) > 1: - usage(_("For any variant, only one value may be " - "specified.")) - varattrs[v] = varattrs[v].pop() - - ignoreattrs = set(ignoreattrs) - onlyattrs = set(onlyattrs) - onlytypes = set(onlytypes) - - utypes = set( - t - for t in onlytypes - if t == "generic" or t not in pkg.actions.types +def main_func(): + ignoreattrs = [] + onlyattrs = [] + onlytypes = [] + varattrs = defaultdict(set) + cmp_policy = CMP_ALL + + try: + opts, pargs = getopt.getopt(sys.argv[1:], "i:o:t:uv:?", ["help"]) + for opt, arg in opts: + if opt == "-i": + ignoreattrs.append("timestamp") + ignoreattrs.append(arg) + if arg == "hash": + ignoreattrs.extend( + ["chash", "pkg.content-hash", "elfhash", "timestamp"] + ) + if arg == "size": + ignoreattrs.extend(["pkg.csize", "pkg.size"]) + elif opt == "-o": + onlyattrs.append(arg) + elif opt == "-t": + onlytypes.extend(arg.split(",")) + elif opt == "-u": + cmp_policy = CMP_UNSIGNED + elif opt == "-v": + args = arg.split("=") + if len(args) != 2: + usage(_("variant option incorrect {0}").format(arg)) + if not args[0].startswith("variant."): + args[0] = "variant." + args[0] + varattrs[args[0]].add(args[1]) + elif opt in ("--help", "-?"): + usage(exitcode=EXIT_OK) + + except getopt.GetoptError as e: + usage(_("illegal global option -- {0}").format(e.opt)) + + if len(pargs) != 2: + usage(_("two manifest arguments are required")) + + if pargs[0] == "-" and pargs[1] == "-": + usage(_("only one manifest argument can be stdin")) + + if ignoreattrs and onlyattrs: + usage(_("-i and -o options may not be used at the same time.")) + + for v in varattrs: + if len(varattrs[v]) > 1: + usage(_("For any variant, only one value may be " "specified.")) + varattrs[v] = varattrs[v].pop() + + ignoreattrs = set(ignoreattrs) + onlyattrs = set(onlyattrs) + onlytypes = set(onlytypes) + + utypes = set( + t for t in onlytypes if t == "generic" or t not in pkg.actions.types + ) + + if utypes: + usage( + _( + "unknown action types: {0}".format( + apx.list_to_lang(list(utypes)) + ) + ) ) - if utypes: - usage(_("unknown action types: {0}".format( - apx.list_to_lang(list(utypes))))) - - manifest1 = manifest.Manifest() - manifest2 = manifest.Manifest() - try: - # This assumes that both pargs are not '-'. - for p, m in zip(pargs, (manifest1, manifest2)): - if p == "-": - m.set_content(content=sys.stdin.read()) - else: - m.set_content(pathname=p) - except (pkg.actions.ActionError, apx.InvalidPackageErrors) as e: - error(_("Action error in file {p}: {e}").format(**locals())) - except (EnvironmentError, apx.ApiException) as e: - error(e) - - # - # manifest filtering - # - - # filter action type - if onlytypes: - for m in (manifest1, manifest2): - # Must pass complete list of actions to set_content, not - # a generator, to avoid clobbering manifest contents. - m.set_content(content=list(m.gen_actions_by_types( - onlytypes))) - - # filter variant + manifest1 = manifest.Manifest() + manifest2 = manifest.Manifest() + try: + # This assumes that both pargs are not '-'. + for p, m in zip(pargs, (manifest1, manifest2)): + if p == "-": + m.set_content(content=sys.stdin.read()) + else: + m.set_content(pathname=p) + except (pkg.actions.ActionError, apx.InvalidPackageErrors) as e: + error(_("Action error in file {p}: {e}").format(**locals())) + except (EnvironmentError, apx.ApiException) as e: + error(e) + + # + # manifest filtering + # + + # filter action type + if onlytypes: + for m in (manifest1, manifest2): + # Must pass complete list of actions to set_content, not + # a generator, to avoid clobbering manifest contents. + m.set_content(content=list(m.gen_actions_by_types(onlytypes))) + + # filter variant + v1 = manifest1.get_all_variants() + v2 = manifest2.get_all_variants() + for vname in varattrs: + for _path, v, m in zip(pargs, (v1, v2), (manifest1, manifest2)): + if vname not in v: + continue + filt = varattrs[vname] + if filt not in v[vname]: + usage( + _( + "Manifest {path} doesn't support " + "variant {vname}={filt}".format(**locals()) + ) + ) + + # remove the variant tag + def rip(a): + a.attrs.pop(vname, None) + return a + + m.set_content( + [ + rip(a) + for a in m.gen_actions( + excludes=[variant.Variants({vname: filt}).allow_action] + ) + ] + ) + m[vname] = filt + + if varattrs: + # need to rebuild these if we're filtering variants v1 = manifest1.get_all_variants() v2 = manifest2.get_all_variants() - for vname in varattrs: - for _path, v, m in zip(pargs, (v1, v2), (manifest1, manifest2)): - if vname not in v: - continue - filt = varattrs[vname] - if filt not in v[vname]: - usage(_("Manifest {path} doesn't support " - "variant {vname}={filt}".format(**locals()))) - # remove the variant tag - def rip(a): - a.attrs.pop(vname, None) - return a - m.set_content([ - rip(a) - for a in m.gen_actions(excludes=[ - variant.Variants({vname: filt}).allow_action]) - ]) - m[vname] = filt - - if varattrs: - # need to rebuild these if we're filtering variants - v1 = manifest1.get_all_variants() - v2 = manifest2.get_all_variants() - - # we need to be a little clever about variants, since - # we can have multiple actions w/ the same key attributes - # in each manifest in that case. First, make sure any variants - # of the same name have the same values defined. - for k in set(v1.keys()) & set(v2.keys()): - if v1[k] != v2[k]: - error(_("Manifests support different variants " - "{v1} {v2}").format(v1=v1, v2=v2)) - - # Now, get a list of all possible variant values, including None - # across all variants and both manifests - v_values = dict() - - for v in v1: - v1[v].add(None) - for a in v1[v]: - v_values.setdefault(v, set()).add((v, a)) - - for v in v2: - v2[v].add(None) - for a in v2[v]: - v_values.setdefault(v, set()).add((v, a)) - - diffs = [] - - for tup in product(*v_values.values()): - # build excludes closure to examine only actions exactly - # matching current variant values... this is needed to - # avoid confusing manifest difference code w/ multiple - # actions w/ same key attribute values or getting dups - # in output - def allow(a, publisher=None): - for k, v in tup: - if v is not None: - if k not in a.attrs or a.attrs[k] != v: - return False - elif k in a.attrs: - return False - return True - - a, c, r = manifest2.difference(manifest1, [allow], [allow], - cmp_policy=cmp_policy) - diffs += a - diffs += c - diffs += r - - # License action still causes spurious diffs... check again for now. - real_diffs = [ - (a, b) - for a, b in diffs - if a is None or b is None or a.different(b, cmp_policy=cmp_policy) - ] - - if not real_diffs: - return 0 - - # define some ordering functions so that output is easily readable - # First, a human version of action comparison that works across - # variants and action changes... - def compare(a, b): - # pull the relevant action out of the old value, new - # value tuples - a = a[0] if a[0] else a[1] - b = b[0] if b[0] else b[1] - - if hasattr(a, "key_attr") and hasattr(b, "key_attr") and \ - a.key_attr == b.key_attr: - res = misc.cmp(a.attrs[a.key_attr], b.attrs[b.key_attr]) - if res != NotImplemented: - return res - # sort by variant - res = misc.cmp(sorted(list(a.get_variant_template())), - sorted(list(b.get_variant_template()))) - if res != NotImplemented: - return res - else: - res = misc.cmp(a.ordinality, b.ordinality) - if res != NotImplemented: - return res - # Fall back to a simple string compare if we have - # differing types (indicated by the above NotImplemented - # return values. - return misc.cmp(str(a), str(b)) - - # sort and.... - diffs = sorted(diffs, key=cmp_to_key(compare)) - - # handle list attributes - def attrval(attrs, k, elide_iter=tuple()): - def q(s): - if " " in s or s == "": - return '"{0}"'.format(s) - else: - return s - - v = attrs[k] - if isinstance(v, list) or isinstance(v, set): - out = " ".join([ - "{0}={1}".format(k, q(lmt)) - for lmt in sorted(v) - if lmt not in elide_iter - ]) - elif " " in v or v == "": - out = k + "=\"" + v + "\"" - else: - out = k + "=" + v - return out - - # figure out when to print diffs - def conditional_print(s, a): - if onlyattrs: - if not set(a.attrs.keys()) & onlyattrs: - return False - elif ignoreattrs: - if not set(a.attrs.keys()) - ignoreattrs: - return False - - print("{0} {1}".format(s, a)) - return True - - different = False - - for old, new in diffs: - if not new: - different |= conditional_print("-", old) - elif not old: - different |= conditional_print("+", new) + + # we need to be a little clever about variants, since + # we can have multiple actions w/ the same key attributes + # in each manifest in that case. First, make sure any variants + # of the same name have the same values defined. + for k in set(v1.keys()) & set(v2.keys()): + if v1[k] != v2[k]: + error( + _("Manifests support different variants " "{v1} {v2}").format( + v1=v1, v2=v2 + ) + ) + + # Now, get a list of all possible variant values, including None + # across all variants and both manifests + v_values = dict() + + for v in v1: + v1[v].add(None) + for a in v1[v]: + v_values.setdefault(v, set()).add((v, a)) + + for v in v2: + v2[v].add(None) + for a in v2[v]: + v_values.setdefault(v, set()).add((v, a)) + + diffs = [] + + for tup in product(*v_values.values()): + # build excludes closure to examine only actions exactly + # matching current variant values... this is needed to + # avoid confusing manifest difference code w/ multiple + # actions w/ same key attribute values or getting dups + # in output + def allow(a, publisher=None): + for k, v in tup: + if v is not None: + if k not in a.attrs or a.attrs[k] != v: + return False + elif k in a.attrs: + return False + return True + + a, c, r = manifest2.difference( + manifest1, [allow], [allow], cmp_policy=cmp_policy + ) + diffs += a + diffs += c + diffs += r + + # License action still causes spurious diffs... check again for now. + real_diffs = [ + (a, b) + for a, b in diffs + if a is None or b is None or a.different(b, cmp_policy=cmp_policy) + ] + + if not real_diffs: + return 0 + + # define some ordering functions so that output is easily readable + # First, a human version of action comparison that works across + # variants and action changes... + def compare(a, b): + # pull the relevant action out of the old value, new + # value tuples + a = a[0] if a[0] else a[1] + b = b[0] if b[0] else b[1] + + if ( + hasattr(a, "key_attr") + and hasattr(b, "key_attr") + and a.key_attr == b.key_attr + ): + res = misc.cmp(a.attrs[a.key_attr], b.attrs[b.key_attr]) + if res != NotImplemented: + return res + # sort by variant + res = misc.cmp( + sorted(list(a.get_variant_template())), + sorted(list(b.get_variant_template())), + ) + if res != NotImplemented: + return res + else: + res = misc.cmp(a.ordinality, b.ordinality) + if res != NotImplemented: + return res + # Fall back to a simple string compare if we have + # differing types (indicated by the above NotImplemented + # return values. + return misc.cmp(str(a), str(b)) + + # sort and.... + diffs = sorted(diffs, key=cmp_to_key(compare)) + + # handle list attributes + def attrval(attrs, k, elide_iter=tuple()): + def q(s): + if " " in s or s == "": + return '"{0}"'.format(s) + else: + return s + + v = attrs[k] + if isinstance(v, list) or isinstance(v, set): + out = " ".join( + [ + "{0}={1}".format(k, q(lmt)) + for lmt in sorted(v) + if lmt not in elide_iter + ] + ) + elif " " in v or v == "": + out = k + '="' + v + '"' + else: + out = k + "=" + v + return out + + # figure out when to print diffs + def conditional_print(s, a): + if onlyattrs: + if not set(a.attrs.keys()) & onlyattrs: + return False + elif ignoreattrs: + if not set(a.attrs.keys()) - ignoreattrs: + return False + + print("{0} {1}".format(s, a)) + return True + + different = False + + for old, new in diffs: + if not new: + different |= conditional_print("-", old) + elif not old: + different |= conditional_print("+", new) + else: + s = [] + + if not onlyattrs: + if hasattr(old, "hash") and "hash" not in ignoreattrs: + if old.hash != new.hash: + s.append(" - {0}".format(old.hash)) + s.append(" + {0}".format(new.hash)) + attrdiffs = set(new.differences(old)) - ignoreattrs + attrsames = sorted( + list( + set(list(old.attrs.keys()) + list(new.attrs.keys())) + - set(new.differences(old)) + ) + ) + else: + if hasattr(old, "hash") and "hash" in onlyattrs: + if old.hash != new.hash: + s.append(" - {0}".format(old.hash)) + s.append(" + {0}".format(new.hash)) + attrdiffs = set(new.differences(old)) & onlyattrs + attrsames = sorted( + list( + set(list(old.attrs.keys()) + list(new.attrs.keys())) + - set(new.differences(old)) + ) + ) + + for a in sorted(attrdiffs): + if ( + a in old.attrs + and a in new.attrs + and isinstance(old.attrs[a], list) + and isinstance(new.attrs[a], list) + ): + elide_set = set(old.attrs[a]) & set(new.attrs[a]) else: - s = [] - - if not onlyattrs: - if (hasattr(old, "hash") and - "hash" not in ignoreattrs): - if old.hash != new.hash: - s.append(" - {0}".format(old.hash)) - s.append(" + {0}".format(new.hash)) - attrdiffs = (set(new.differences(old)) - - ignoreattrs) - attrsames = sorted( list(set(list(old.attrs.keys()) + - list(new.attrs.keys())) - - set(new.differences(old)))) - else: - if hasattr(old, "hash") and "hash" in onlyattrs: - if old.hash != new.hash: - s.append(" - {0}".format(old.hash)) - s.append(" + {0}".format(new.hash)) - attrdiffs = (set(new.differences(old)) & - onlyattrs) - attrsames = sorted(list(set(list(old.attrs.keys()) + - list(new.attrs.keys())) - - set(new.differences(old)))) - - for a in sorted(attrdiffs): - if a in old.attrs and a in new.attrs and \ - isinstance(old.attrs[a], list) and \ - isinstance(new.attrs[a], list): - elide_set = (set(old.attrs[a]) & - set(new.attrs[a])) - else: - elide_set = set() - if a in old.attrs: - diff_str = attrval(old.attrs, a, - elide_iter=elide_set) - if diff_str: - s.append(" - {0}".format(diff_str)) - if a in new.attrs: - diff_str = attrval(new.attrs, a, - elide_iter=elide_set) - if diff_str: - s.append(" + {0}".format(diff_str)) - # print out part of action that is the same - if s: - different = True - print("{0} {1} {2}".format(old.name, - attrval(old.attrs, old.key_attr), - " ".join(("{0}".format(attrval(old.attrs,v)) - for v in attrsames if v != old.key_attr)))) - - for l in s: - print(l) - - return int(different) + elide_set = set() + if a in old.attrs: + diff_str = attrval(old.attrs, a, elide_iter=elide_set) + if diff_str: + s.append(" - {0}".format(diff_str)) + if a in new.attrs: + diff_str = attrval(new.attrs, a, elide_iter=elide_set) + if diff_str: + s.append(" + {0}".format(diff_str)) + # print out part of action that is the same + if s: + different = True + print( + "{0} {1} {2}".format( + old.name, + attrval(old.attrs, old.key_attr), + " ".join( + ( + "{0}".format(attrval(old.attrs, v)) + for v in attrsames + if v != old.key_attr + ) + ), + ) + ) + + for l in s: + print(l) + + return int(different) + if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - try: - exit_code = main_func() - except (PipeError, KeyboardInterrupt): - exit_code = EXIT_OOPS - except SystemExit as __e: - exit_code = __e - except Exception as __e: - traceback.print_exc() - error(misc.get_traceback_message(), exitcode=None) - exit_code = 99 - - sys.exit(exit_code) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + try: + exit_code = main_func() + except (PipeError, KeyboardInterrupt): + exit_code = EXIT_OOPS + except SystemExit as __e: + exit_code = __e + except Exception as __e: + traceback.print_exc() + error(misc.get_traceback_message(), exitcode=None) + exit_code = 99 + + sys.exit(exit_code) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/pkgfmt.py b/src/util/publish/pkgfmt.py index 1ec22be40..bf8bdac2e 100755 --- a/src/util/publish/pkgfmt.py +++ b/src/util/publish/pkgfmt.py @@ -25,7 +25,9 @@ # from __future__ import print_function -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() # Prefixes should be ordered alphabetically with most specific first. DRIVER_ALIAS_PREFIXES = ( @@ -56,35 +58,35 @@ # 7) key attribute tags come first try: - import copy - import errno - import getopt - import gettext - import locale - import operator - import os - import re - import six - import sys - import tempfile - import traceback - import warnings - from difflib import unified_diff - from functools import cmp_to_key - from six.moves import cStringIO - - import pkg - import pkg.actions - import pkg.misc as misc - import pkg.portable - from pkg.misc import emsg, PipeError - from pkg.actions.generic import quote_attr_value - from pkg.actions.depend import known_types as dep_types - from pkg.client.pkgdefs import (EXIT_OK, EXIT_OOPS, EXIT_BADOPT, - EXIT_PARTIAL) + import copy + import errno + import getopt + import gettext + import locale + import operator + import os + import re + import six + import sys + import tempfile + import traceback + import warnings + from difflib import unified_diff + from functools import cmp_to_key + from six.moves import cStringIO + + import pkg + import pkg.actions + import pkg.misc as misc + import pkg.portable + from pkg.misc import emsg, PipeError + from pkg.actions.generic import quote_attr_value + from pkg.actions.depend import known_types as dep_types + from pkg.client.pkgdefs import EXIT_OK, EXIT_OOPS, EXIT_BADOPT, EXIT_PARTIAL except KeyboardInterrupt: - import sys - sys.exit(EXIT_OOPS) + import sys + + sys.exit(EXIT_OOPS) FMT_V1 = "v1" FMT_V2 = "v2" @@ -96,679 +98,715 @@ opt_format = FMT_V2 orig_opt_format = None + def usage(errmsg="", exitcode=EXIT_BADOPT): - """Emit a usage message and optionally prefix it with a more specific - error message. Causes program to exit.""" + """Emit a usage message and optionally prefix it with a more specific + error message. Causes program to exit.""" - if errmsg: - error(errmsg) + if errmsg: + error(errmsg) - print(_("""\ + print( + _( + """\ Usage: - pkgfmt [-cdsu] [-f v1|v2] [file1] ... """), file=sys.stderr) - - sys.exit(exitcode) - -def error(text, exitcode=EXIT_OOPS): - """Emit an error message prefixed by the command name """ + pkgfmt [-cdsu] [-f v1|v2] [file1] ... """ + ), + file=sys.stderr, + ) - # If we get passed something like an Exception, we can convert - # it down to a string. - text = str(text) + sys.exit(exitcode) - # If the message starts with whitespace, assume that it should come - # *before* the command-name prefix. - text_nows = text.lstrip() - ws = text[:len(text) - len(text_nows)] - # This has to be a constant value as we can't reliably get our actual - # program name on all platforms. - emsg(ws + "pkgfmt: error: " + text_nows) +def error(text, exitcode=EXIT_OOPS): + """Emit an error message prefixed by the command name""" - if exitcode != None: - sys.exit(exitcode) + # If we get passed something like an Exception, we can convert + # it down to a string. + text = str(text) -def read_line(f): - """Generates the lines in the file as tuples containing - (action, prepended macro, list of prepended comment lines); - handles continuation lines, transforms, etc.""" + # If the message starts with whitespace, assume that it should come + # *before* the command-name prefix. + text_nows = text.lstrip() + ws = text[: len(text) - len(text_nows)] - accumulate = "" - wrap_accumulate = "" - noncomment_line_seen = False - comments = [] + # This has to be a constant value as we can't reliably get our actual + # program name on all platforms. + emsg(ws + "pkgfmt: error: " + text_nows) - for l in f: - line = l.strip() - wrap_line = l - # Preserve line continuations for transforms for V2, - # but force standard leading space formatting. - if line.endswith("\\"): - accumulate += line[:-1] - wrap_accumulate += re.sub(r"^\s+", " ", - wrap_line.rstrip(" \t")) - continue - elif accumulate: - line = accumulate + line - wrap_line = wrap_accumulate + re.sub(r"^\s+", " ", - wrap_line) - accumulate = "" - wrap_accumulate = "" - - if not line or line[0] == "#": - comments.append(line) - continue + if exitcode != None: + sys.exit(exitcode) - if not noncomment_line_seen: - noncomment_line_seen = True - yield None, "", comments - comments = [] - if line.startswith("$("): - cp = line.index(")") - macro = line[:cp + 1] - actstr = line[cp + 1:] - else: - macro = "" - actstr = line - - if actstr[0] == "<" and actstr[-1] == ">": - if opt_format == FMT_V2: - yield None, wrap_line.rstrip(), comments - else: - yield None, macro + actstr, comments +def read_line(f): + """Generates the lines in the file as tuples containing + (action, prepended macro, list of prepended comment lines); + handles continuation lines, transforms, etc.""" + + accumulate = "" + wrap_accumulate = "" + noncomment_line_seen = False + comments = [] + + for l in f: + line = l.strip() + wrap_line = l + # Preserve line continuations for transforms for V2, + # but force standard leading space formatting. + if line.endswith("\\"): + accumulate += line[:-1] + wrap_accumulate += re.sub(r"^\s+", " ", wrap_line.rstrip(" \t")) + continue + elif accumulate: + line = accumulate + line + wrap_line = wrap_accumulate + re.sub(r"^\s+", " ", wrap_line) + accumulate = "" + wrap_accumulate = "" + + if not line or line[0] == "#": + comments.append(line) + continue + + if not noncomment_line_seen: + noncomment_line_seen = True + yield None, "", comments + comments = [] + + if line.startswith("$("): + cp = line.index(")") + macro = line[: cp + 1] + actstr = line[cp + 1 :] + else: + macro = "" + actstr = line + + if actstr[0] == "<" and actstr[-1] == ">": + if opt_format == FMT_V2: + yield None, wrap_line.rstrip(), comments + else: + yield None, macro + actstr, comments + + comments = [] + macro = "" + continue - comments = [] - macro = "" - continue + try: + act = pkg.actions.fromstr(actstr) + + # For formatting purposes, treat dependency actions that + # do not yet have a valid type as invalid. + if act.name == "depend" and act.attrs.get("type") not in dep_types: + raise pkg.actions.InvalidActionError( + act, + _("Unknown type '{0}' in depend " "action").format( + act.attrs.get("type") + ), + ) + except ( + pkg.actions.MalformedActionError, + pkg.actions.UnknownActionError, + pkg.actions.InvalidActionError, + ): + # cannot convert; treat as special macro + yield None, macro + actstr, comments + else: + yield act, macro, comments - try: - act = pkg.actions.fromstr(actstr) - - # For formatting purposes, treat dependency actions that - # do not yet have a valid type as invalid. - if (act.name == "depend" and - act.attrs.get("type") not in dep_types): - raise pkg.actions.InvalidActionError(act, - _("Unknown type '{0}' in depend " - "action").format(act.attrs.get("type"))) - except (pkg.actions.MalformedActionError, - pkg.actions.UnknownActionError, - pkg.actions.InvalidActionError): - # cannot convert; treat as special macro - yield None, macro + actstr, comments - else: - yield act, macro, comments + comments = [] - comments = [] + if comments: + yield None, "", comments - if comments: - yield None, "", comments def cmplines(a, b): - """Compare two line tuples for sorting""" - # we know that all lines that reach here have actions - # make set actions first - # depend actions last - # rest in alpha order - - def typeord(a): - if a.name == "set": - return 1 - if opt_format == FMT_V2: - if a.name in ("driver", "group", "user"): - return 3 - if a.name in ("legacy", "license"): - return 4 - if a.name == "depend": - return 5 - return 2 - - c = misc.cmp(typeord(a[0]), typeord(b[0])) + """Compare two line tuples for sorting""" + # we know that all lines that reach here have actions + # make set actions first + # depend actions last + # rest in alpha order + + def typeord(a): + if a.name == "set": + return 1 + if opt_format == FMT_V2: + if a.name in ("driver", "group", "user"): + return 3 + if a.name in ("legacy", "license"): + return 4 + if a.name == "depend": + return 5 + return 2 + + c = misc.cmp(typeord(a[0]), typeord(b[0])) + if c: + return c + + if opt_format != FMT_V2: + c = misc.cmp(a[0].name, b[0].name) if c: - return c - - if opt_format != FMT_V2: - c = misc.cmp(a[0].name, b[0].name) - if c: - return c - - # Place set pkg.fmri actions first among set actions. - if a[0].name == "set" and a[0].attrs["name"] == "pkg.fmri": + return c + + # Place set pkg.fmri actions first among set actions. + if a[0].name == "set" and a[0].attrs["name"] == "pkg.fmri": + return -1 + if b[0].name == "set" and b[0].attrs["name"] == "pkg.fmri": + return 1 + + # Place set actions with names that start with pkg. before any + # remaining set actions. + if ( + a[0].name == "set" + and a[0].attrs["name"].startswith("pkg.") + and not (b[0].name != "set" or b[0].attrs["name"].startswith("pkg.")) + ): + return -1 + if ( + b[0].name == "set" + and b[0].attrs["name"].startswith("pkg.") + and not (a[0].name != "set" or a[0].attrs["name"].startswith("pkg.")) + ): + return 1 + + if opt_format == FMT_V2: + # Place set pkg.summary actions second and pkg.description + # options third. + for attr in ("pkg.summary", "pkg.description"): + if ( + a[0].name == "set" + and a[0].attrs["name"] == attr + and not b[0].attrs["name"] == attr + ): return -1 - if b[0].name == "set" and b[0].attrs["name"] == "pkg.fmri": - return 1 - - # Place set actions with names that start with pkg. before any - # remaining set actions. - if a[0].name == "set" and a[0].attrs["name"].startswith("pkg.") and \ - not (b[0].name != "set" or b[0].attrs["name"].startswith("pkg.")): - return -1 - if b[0].name == "set" and b[0].attrs["name"].startswith("pkg.") and \ - not (a[0].name != "set" or a[0].attrs["name"].startswith("pkg.")): + if ( + b[0].name == "set" + and b[0].attrs["name"] == attr + and not a[0].attrs["name"] == attr + ): return 1 + # Sort actions based on key attribute (if applicable). + key_attr = a[0].key_attr + if key_attr and key_attr == b[0].key_attr: + a_sk = b_sk = None if opt_format == FMT_V2: - # Place set pkg.summary actions second and pkg.description - # options third. - for attr in ("pkg.summary", "pkg.description"): - if (a[0].name == "set" and - a[0].attrs["name"] == attr and - not b[0].attrs["name"] == attr): - return -1 - if (b[0].name == "set" and - b[0].attrs["name"] == attr and - not a[0].attrs["name"] == attr): - return 1 - - # Sort actions based on key attribute (if applicable). - key_attr = a[0].key_attr - if key_attr and key_attr == b[0].key_attr: - a_sk = b_sk = None - if opt_format == FMT_V2: - if "path" in a[0].attrs and "path" in b[0].attrs: - # This ensures filesystem actions are sorted by - # path and link and hardlink actions are sorted - # by path and then target (when compared against - # each other). - if "target" in a[0].attrs and \ - "target" in b[0].attrs: - a_sk = operator.itemgetter("path", - "target")(a[0].attrs) - b_sk = operator.itemgetter("path", - "target")(b[0].attrs) - else: - a_sk = a[0].attrs["path"] - b_sk = b[0].attrs["path"] - elif a[0].name == "depend" and b[0].name == "depend": - a_sk = operator.itemgetter("type", "fmri")( - a[0].attrs) - b_sk = operator.itemgetter("type", "fmri")( - b[0].attrs) - - # If not using alternate format, or if no sort key has been - # determined, fallback to sorting on key attribute. - if not a_sk: - a_sk = a[0].attrs[key_attr] - if not b_sk: - b_sk = b[0].attrs[key_attr] - - c = misc.cmp(a_sk, b_sk) - # misc.cmp returns NotImplemented for uncomparable types in - # Python 3. Sort them based on stringified key attribute. - if c is NotImplemented: - c = misc.cmp(str(a_sk), str(b_sk)) - if c: - return c - elif c: - return c - - # No key attribute or key attribute sorting provides equal placement, so - # sort based on stringified action. - return misc.cmp(str(a[0]), str(b[0])) - -def write_line(line, fileobj): - """Write out a manifest line""" - # write out any comments w/o changes - global opt_unwrap - - comments = "\n".join(line[2]) - act = line[0] - out = line[1] + act.name - - sattrs = act.attrs - - ahash = None - try: - ahash = act.hash - if ahash and ahash != "NOHASH": - if "=" not in ahash and " " not in ahash and \ - '"' not in ahash: - out += " " + ahash - else: - sattrs = copy.copy(act.attrs) - sattrs["hash"] = ahash - ahash = None - except AttributeError: - # No hash to stash. - pass - - if opt_strip: - sattrs = dict((k, v) for k, v in six.iteritems(sattrs) - if not k.startswith('pkg.depend.') and - not k.startswith('pkg.debug')) - - # high order bits in sorting - def kvord(a): - # Variants should always be last attribute. - if a[0].startswith("variant."): - return 7 - # Facets should always be before variants. - if a[0].startswith("facet."): - return 6 - # List attributes should be before facets and variants. - if isinstance(a[1], list): - return 5 - - # note closure hack... - if opt_format == FMT_V2: - if act.name == "depend": - # For depend actions, type should always come - # first even though it's not the key attribute, - # and fmri should always come after type. - if a[0] == "fmri": - return 1 - elif a[0] == "type": - return 0 - elif act.name == "driver": - # For driver actions, attributes should be in - # this order: name, perms, clone_perms, privs, - # policy, devlink, alias. - if a[0] == "alias": - return 6 - elif a[0] == "devlink": - return 5 - elif a[0] == "policy": - return 4 - elif a[0] == "privs": - return 3 - elif a[0] == "clone_perms": - return 2 - elif a[0] == "perms": - return 1 - elif act.name != "user": - # Place target after path, owner before group, - # and all immediately after the action's key - # attribute. - if a[0] == "mode": - return 3 - elif a[0] == "group": - return 2 - elif a[0] == "owner" or a[0] == "target": - return 1 - - # Any other attributes should come just before list, facet, - # and variant attributes. - if a[0] != act.key_attr: - return 4 - - # No special order for all other cases. - return 0 - - # actual key function - def key_func(a): - return (kvord(a), a[0]) - - JOIN_TOK = " \\\n " - def grow(a, b, rem_values, force_nl=False): - if opt_unwrap or not force_nl: - lastnl = a.rfind("\n") - if lastnl == -1: - lastnl = 0 - - if opt_format == FMT_V2 and rem_values == 1: - # If outputting the last attribute value, then - # use full line length. - max_len = 80 - else: - # If V1 format, or there are more attributes to - # output, then account for line-continuation - # marker. - max_len = 78 - - # Note this length comparison doesn't include the space - # used to append the second part of the string. - if opt_unwrap or (len(a) - lastnl + len(b) < max_len): - return a + " " + b - return a + JOIN_TOK + b - - def get_alias_key(v): - """This function parses an alias attribute value into a list - of numeric values (e.g. hex -> int) and strings that can be - sensibly compared for sorting.""" - - alias = None - prefix = None - for pfx in DRIVER_ALIAS_PREFIXES: - if v.startswith(pfx): - # Strip known prefixes before attempting - # to create list of sort values. - alias = v.replace(pfx, "") - prefix = pfx - break - - if alias is None: - # alias didn't start with known prefix; use - # raw value for sorting. - return [v] - - entry = [prefix] - for part in alias.split(","): - for comp in part.split("."): - try: - cval = int(comp, 16) - except ValueError: - cval = comp - entry.append(cval) - return entry - - def cmp_aliases(a, b): - if opt_format == FMT_V1: - # Simple comparison for V1 format. - return misc.cmp(a, b) - # For V2 format, order aliases by interpreted value. - c = misc.cmp(get_alias_key(a), get_alias_key(b)) - # misc.cmp returns NotImplemented for uncomparable types in - # Python 3. Instead fallback to using the string of the key - # generated from the alias and sort alphabetically. This - # maintains the python 2 sorting behaviour. - if c is NotImplemented: - c = -misc.cmp(str(get_alias_key(a)), str(get_alias_key(b))) + if "path" in a[0].attrs and "path" in b[0].attrs: + # This ensures filesystem actions are sorted by + # path and link and hardlink actions are sorted + # by path and then target (when compared against + # each other). + if "target" in a[0].attrs and "target" in b[0].attrs: + a_sk = operator.itemgetter("path", "target")(a[0].attrs) + b_sk = operator.itemgetter("path", "target")(b[0].attrs) + else: + a_sk = a[0].attrs["path"] + b_sk = b[0].attrs["path"] + elif a[0].name == "depend" and b[0].name == "depend": + a_sk = operator.itemgetter("type", "fmri")(a[0].attrs) + b_sk = operator.itemgetter("type", "fmri")(b[0].attrs) + + # If not using alternate format, or if no sort key has been + # determined, fallback to sorting on key attribute. + if not a_sk: + a_sk = a[0].attrs[key_attr] + if not b_sk: + b_sk = b[0].attrs[key_attr] + + c = misc.cmp(a_sk, b_sk) + # misc.cmp returns NotImplemented for uncomparable types in + # Python 3. Sort them based on stringified key attribute. + if c is NotImplemented: + c = misc.cmp(str(a_sk), str(b_sk)) + if c: return c + elif c: + return c - def astr(aout): - # Number of attribute values for first line and remaining. - first_line = True - first_attr_count = 0 - rem_attr_count = 0 - - # Total number of remaining attribute values to output. - total_count = sum(len(act.attrlist(k)) for k in sattrs) - rem_count = total_count - - # Now build the action output string an attribute at a time. - for k, v in sorted(six.iteritems(sattrs), key=key_func): - # Newline breaks are only forced when there is more than - # one value for an attribute. - if not (isinstance(v, list) or isinstance(v, set)): - nv = [v] - use_force_nl = False - else: - nv = v - use_force_nl = True - - cmp_attrs = None - if k == "alias": - cmp_attrs = cmp_to_key(cmp_aliases) - for lmt in sorted(nv, key=cmp_attrs): - force_nl = use_force_nl and \ - (k == "alias" or (opt_format == FMT_V2 and - k.startswith("pkg.debug"))) - - aout = grow(aout, "=".join((k, - quote_attr_value(lmt))), rem_count, - force_nl=force_nl) - - # Must be done for each value. - if first_line and JOIN_TOK in aout: - first_line = False - first_attr_count = \ - (total_count - rem_count) - if ahash and ahash != "NOHASH": - first_attr_count += 1 - rem_attr_count = rem_count - - rem_count -= 1 - - return first_attr_count, rem_attr_count, aout - - first_attr_count, rem_attr_count, output = astr(out) - if opt_format == FMT_V2 and not opt_unwrap: - outlines = output.split(JOIN_TOK) - - # If wrapping only resulted in two lines, and the second line - # only has one attribute and the first line had zero attributes, - # unwrap the action. - if first_attr_count < 2 and rem_attr_count == 1 and \ - len(outlines) == 2 and first_attr_count == 0: - opt_unwrap = True - output = astr(out)[-1] - opt_unwrap = False - - if comments: - print(comments, file=fileobj) - - if (opt_strip and act.name == 'set' and - act.attrs['name'].startswith('pkg.debug')): - return + # No key attribute or key attribute sorting provides equal placement, so + # sort based on stringified action. + return misc.cmp(str(a[0]), str(b[0])) - if opt_format == FMT_V2: - # Force 'dir' actions to use four spaces at beginning of lines - # so they line up with other filesystem actions such as file, - # link, etc. - output = re.sub(r"^dir ", "dir ", output) - print(output, file=fileobj) -def main_func(): - global opt_unwrap - global opt_check - global opt_diffs - global opt_strip - global opt_format - global orig_opt_format +def write_line(line, fileobj): + """Write out a manifest line""" + # write out any comments w/o changes + global opt_unwrap + + comments = "\n".join(line[2]) + act = line[0] + out = line[1] + act.name + + sattrs = act.attrs + + ahash = None + try: + ahash = act.hash + if ahash and ahash != "NOHASH": + if "=" not in ahash and " " not in ahash and '"' not in ahash: + out += " " + ahash + else: + sattrs = copy.copy(act.attrs) + sattrs["hash"] = ahash + ahash = None + except AttributeError: + # No hash to stash. + pass + + if opt_strip: + sattrs = dict( + (k, v) + for k, v in six.iteritems(sattrs) + if not k.startswith("pkg.depend.") and not k.startswith("pkg.debug") + ) + + # high order bits in sorting + def kvord(a): + # Variants should always be last attribute. + if a[0].startswith("variant."): + return 7 + # Facets should always be before variants. + if a[0].startswith("facet."): + return 6 + # List attributes should be before facets and variants. + if isinstance(a[1], list): + return 5 + + # note closure hack... + if opt_format == FMT_V2: + if act.name == "depend": + # For depend actions, type should always come + # first even though it's not the key attribute, + # and fmri should always come after type. + if a[0] == "fmri": + return 1 + elif a[0] == "type": + return 0 + elif act.name == "driver": + # For driver actions, attributes should be in + # this order: name, perms, clone_perms, privs, + # policy, devlink, alias. + if a[0] == "alias": + return 6 + elif a[0] == "devlink": + return 5 + elif a[0] == "policy": + return 4 + elif a[0] == "privs": + return 3 + elif a[0] == "clone_perms": + return 2 + elif a[0] == "perms": + return 1 + elif act.name != "user": + # Place target after path, owner before group, + # and all immediately after the action's key + # attribute. + if a[0] == "mode": + return 3 + elif a[0] == "group": + return 2 + elif a[0] == "owner" or a[0] == "target": + return 1 + + # Any other attributes should come just before list, facet, + # and variant attributes. + if a[0] != act.key_attr: + return 4 + + # No special order for all other cases. + return 0 + + # actual key function + def key_func(a): + return (kvord(a), a[0]) + + JOIN_TOK = " \\\n " + + def grow(a, b, rem_values, force_nl=False): + if opt_unwrap or not force_nl: + lastnl = a.rfind("\n") + if lastnl == -1: + lastnl = 0 + + if opt_format == FMT_V2 and rem_values == 1: + # If outputting the last attribute value, then + # use full line length. + max_len = 80 + else: + # If V1 format, or there are more attributes to + # output, then account for line-continuation + # marker. + max_len = 78 + + # Note this length comparison doesn't include the space + # used to append the second part of the string. + if opt_unwrap or (len(a) - lastnl + len(b) < max_len): + return a + " " + b + return a + JOIN_TOK + b + + def get_alias_key(v): + """This function parses an alias attribute value into a list + of numeric values (e.g. hex -> int) and strings that can be + sensibly compared for sorting.""" + + alias = None + prefix = None + for pfx in DRIVER_ALIAS_PREFIXES: + if v.startswith(pfx): + # Strip known prefixes before attempting + # to create list of sort values. + alias = v.replace(pfx, "") + prefix = pfx + break + + if alias is None: + # alias didn't start with known prefix; use + # raw value for sorting. + return [v] + + entry = [prefix] + for part in alias.split(","): + for comp in part.split("."): + try: + cval = int(comp, 16) + except ValueError: + cval = comp + entry.append(cval) + return entry + + def cmp_aliases(a, b): + if opt_format == FMT_V1: + # Simple comparison for V1 format. + return misc.cmp(a, b) + # For V2 format, order aliases by interpreted value. + c = misc.cmp(get_alias_key(a), get_alias_key(b)) + # misc.cmp returns NotImplemented for uncomparable types in + # Python 3. Instead fallback to using the string of the key + # generated from the alias and sort alphabetically. This + # maintains the python 2 sorting behaviour. + if c is NotImplemented: + c = -misc.cmp(str(get_alias_key(a)), str(get_alias_key(b))) + return c + + def astr(aout): + # Number of attribute values for first line and remaining. + first_line = True + first_attr_count = 0 + rem_attr_count = 0 + + # Total number of remaining attribute values to output. + total_count = sum(len(act.attrlist(k)) for k in sattrs) + rem_count = total_count + + # Now build the action output string an attribute at a time. + for k, v in sorted(six.iteritems(sattrs), key=key_func): + # Newline breaks are only forced when there is more than + # one value for an attribute. + if not (isinstance(v, list) or isinstance(v, set)): + nv = [v] + use_force_nl = False + else: + nv = v + use_force_nl = True + + cmp_attrs = None + if k == "alias": + cmp_attrs = cmp_to_key(cmp_aliases) + for lmt in sorted(nv, key=cmp_attrs): + force_nl = use_force_nl and ( + k == "alias" + or (opt_format == FMT_V2 and k.startswith("pkg.debug")) + ) + + aout = grow( + aout, + "=".join((k, quote_attr_value(lmt))), + rem_count, + force_nl=force_nl, + ) + + # Must be done for each value. + if first_line and JOIN_TOK in aout: + first_line = False + first_attr_count = total_count - rem_count + if ahash and ahash != "NOHASH": + first_attr_count += 1 + rem_attr_count = rem_count + + rem_count -= 1 + + return first_attr_count, rem_attr_count, aout + + first_attr_count, rem_attr_count, output = astr(out) + if opt_format == FMT_V2 and not opt_unwrap: + outlines = output.split(JOIN_TOK) + + # If wrapping only resulted in two lines, and the second line + # only has one attribute and the first line had zero attributes, + # unwrap the action. + if ( + first_attr_count < 2 + and rem_attr_count == 1 + and len(outlines) == 2 + and first_attr_count == 0 + ): + opt_unwrap = True + output = astr(out)[-1] + opt_unwrap = False + + if comments: + print(comments, file=fileobj) + + if ( + opt_strip + and act.name == "set" + and act.attrs["name"].startswith("pkg.debug") + ): + return + + if opt_format == FMT_V2: + # Force 'dir' actions to use four spaces at beginning of lines + # so they line up with other filesystem actions such as file, + # link, etc. + output = re.sub(r"^dir ", "dir ", output) + print(output, file=fileobj) - env_format = os.environ.get("PKGFMT_OUTPUT") - if env_format: - opt_format = orig_opt_format = env_format - ret = 0 - opt_set = set() +def main_func(): + global opt_unwrap + global opt_check + global opt_diffs + global opt_strip + global opt_format + global orig_opt_format + + env_format = os.environ.get("PKGFMT_OUTPUT") + if env_format: + opt_format = orig_opt_format = env_format + + ret = 0 + opt_set = set() + + try: + opts, pargs = getopt.getopt(sys.argv[1:], "cdf:su?h", ["help"]) + for opt, arg in opts: + opt_set.add(opt) + if opt == "-c": + opt_check = True + elif opt == "-d": + opt_diffs = True + elif opt == "-f": + opt_format = orig_opt_format = arg + elif opt == "-s": + opt_strip = True + elif opt == "-u": + opt_unwrap = True + elif opt in ("-h", "--help", "-?"): + usage(exitcode=EXIT_OK) + except getopt.GetoptError as e: + if e.opt == "f": + usage(exitcode=EXIT_BADOPT) + else: + usage(_("illegal global option -- {0}").format(e.opt)) + if len(opt_set - set(["-f"])) > 1: + usage(_("only one of [cdu] may be specified")) + if opt_format not in (FMT_V1, FMT_V2): + usage(_("unsupported format '{0}'").format(opt_format)) + + def difference(in_file): + whole_f1 = in_file.readlines() + f2 = cStringIO() + fmt_file(cStringIO("".join(whole_f1)), f2) + f2.seek(0) + whole_f2 = f2.readlines() + + if whole_f1 == whole_f2: + if opt_diffs: + return 0, "" + return 0, "".join(whole_f2) + elif opt_diffs: + return 1, "".join(unified_diff(whole_f2, whole_f1)) + return 1, "".join(whole_f2) + + flist = pargs + if not flist: + try: + in_file = cStringIO() + in_file.write(sys.stdin.read()) + in_file.seek(0) + + ret, formatted = difference(in_file) + if ret == 1 and opt_check: + # Manifest was different; if user didn't specify + # a format explicitly, try V1 format. + if not orig_opt_format: + opt_format = FMT_V1 + in_file.seek(0) + rcode, formatted = difference(in_file) + opt_format = FMT_V2 + if rcode == 0: + # Manifest is in V1 format. + return 0 + + error(_("manifest is not in pkgfmt form")) + elif ret == 1 and not opt_diffs: + # Treat as successful exit if not checking + # formatting or displaying diffs. + ret = 0 + + # Display formatted version (explicit 'end' needed to + # prevent output of extra newline) even if manifest + # didn't need formatting for the stdin case. (The + # assumption is that it might be used in a pipeline.) + if formatted: + print(formatted, end="") + except EnvironmentError as e: + if e.errno == errno.EPIPE: + # User closed input or output (i.e. killed piped + # program before all input was read or output + # was written). + return 1 + return ret + ret = 0 + tname = None + for fname in flist: try: - opts, pargs = getopt.getopt(sys.argv[1:], "cdf:su?h", ["help"]) - for opt, arg in opts: - opt_set.add(opt) - if opt == "-c": - opt_check = True - elif opt == "-d": - opt_diffs = True - elif opt == "-f": - opt_format = orig_opt_format = arg - elif opt == "-s": - opt_strip = True - elif opt == "-u": - opt_unwrap = True - elif opt in ("-h", "--help", "-?"): - usage(exitcode=EXIT_OK) - except getopt.GetoptError as e: - if e.opt == 'f': - usage(exitcode=EXIT_BADOPT) + # force path to be absolute; gives better diagnostics if + # something goes wrong. + path = os.path.abspath(fname) + + with open(fname, "r") as f: + rcode, formatted = difference(f) + if rcode == 0: + continue + + if opt_check: + # Manifest was different; if user didn't specify + # a format explicitly, try V1 format. + if not orig_opt_format: + opt_format = FMT_V1 + with open(fname, "r") as f: + rcode, formatted = difference(f) + opt_format = FMT_V2 + if rcode == 0: + # Manifest is in V1 format. + continue + + ret = 1 + if orig_opt_format: + error( + _( + "{0} is not in pkgfmt {1} form; " + "run `pkgfmt -f {1}` on the file " + "to reformat the manifest " + "in-place" + ).format(fname, opt_format), + exitcode=None, + ) else: - usage(_("illegal global option -- {0}").format(e.opt)) - if len(opt_set - set(["-f"])) > 1: - usage(_("only one of [cdu] may be specified")) - if opt_format not in (FMT_V1, FMT_V2): - usage(_("unsupported format '{0}'").format(opt_format)) - - def difference(in_file): - whole_f1 = in_file.readlines() - f2 = cStringIO() - fmt_file(cStringIO("".join(whole_f1)), f2) - f2.seek(0) - whole_f2 = f2.readlines() - - if whole_f1 == whole_f2: - if opt_diffs: - return 0, "" - return 0, "".join(whole_f2) - elif opt_diffs: - return 1, "".join(unified_diff(whole_f2, - whole_f1)) - return 1, "".join(whole_f2) - - flist = pargs - if not flist: + error( + _( + "{0} is not in pkgfmt form; " + "run pkgfmt on the file without " + "-c or -d to reformat the manifest " + "in-place" + ).format(fname), + exitcode=None, + ) + continue + elif opt_diffs: + # Display differences (explicit 'end' needed to + # prevent output of extra newline). + ret = 1 + print(formatted, end=" ") + continue + elif ret != 1: + # Treat as successful exit if not checking + # formatting or displaying diffs. + ret = 0 + + # Replace manifest with formatted version. + pathdir = os.path.dirname(path) + tfd, tname = tempfile.mkstemp(dir=pathdir) + with os.fdopen(tfd, "w") as t: + t.write(formatted) + + try: + # Ensure existing mode is preserved. + mode = os.stat(fname).st_mode + os.chmod(tname, mode) + os.rename(tname, fname) + except EnvironmentError as e: + error(str(e), exitcode=EXIT_OOPS) + except (EnvironmentError, IOError) as e: + error(str(e), exitcode=EXIT_OOPS) + finally: + if tname: try: - in_file = cStringIO() - in_file.write(sys.stdin.read()) - in_file.seek(0) - - ret, formatted = difference(in_file) - if ret == 1 and opt_check: - # Manifest was different; if user didn't specify - # a format explicitly, try V1 format. - if not orig_opt_format: - opt_format = FMT_V1 - in_file.seek(0) - rcode, formatted = difference(in_file) - opt_format = FMT_V2 - if rcode == 0: - # Manifest is in V1 format. - return 0 - - error(_("manifest is not in pkgfmt form")) - elif ret == 1 and not opt_diffs: - # Treat as successful exit if not checking - # formatting or displaying diffs. - ret = 0 - - # Display formatted version (explicit 'end' needed to - # prevent output of extra newline) even if manifest - # didn't need formatting for the stdin case. (The - # assumption is that it might be used in a pipeline.) - if formatted: - print(formatted, end="") + pkg.portable.remove(tname) except EnvironmentError as e: - if e.errno == errno.EPIPE: - # User closed input or output (i.e. killed piped - # program before all input was read or output - # was written). - return 1 - return ret - - ret = 0 - tname = None - for fname in flist: - try: - # force path to be absolute; gives better diagnostics if - # something goes wrong. - path = os.path.abspath(fname) - - with open(fname, "r") as f: - rcode, formatted = difference(f) - if rcode == 0: - continue - - if opt_check: - # Manifest was different; if user didn't specify - # a format explicitly, try V1 format. - if not orig_opt_format: - opt_format = FMT_V1 - with open(fname, "r") as f: - rcode, formatted = difference(f) - opt_format = FMT_V2 - if rcode == 0: - # Manifest is in V1 format. - continue - - ret = 1 - if orig_opt_format: - error(_( - "{0} is not in pkgfmt {1} form; " - "run `pkgfmt -f {1}` on the file " - "to reformat the manifest " - "in-place") - .format(fname, opt_format), - exitcode=None) - else: - error(_("{0} is not in pkgfmt form; " - "run pkgfmt on the file without " - "-c or -d to reformat the manifest " - "in-place") .format(fname), - exitcode=None) - continue - elif opt_diffs: - # Display differences (explicit 'end' needed to - # prevent output of extra newline). - ret = 1 - print(formatted, end=" ") - continue - elif ret != 1: - # Treat as successful exit if not checking - # formatting or displaying diffs. - ret = 0 - - # Replace manifest with formatted version. - pathdir = os.path.dirname(path) - tfd, tname = tempfile.mkstemp(dir=pathdir) - with os.fdopen(tfd, "w") as t: - t.write(formatted) - - try: - # Ensure existing mode is preserved. - mode = os.stat(fname).st_mode - os.chmod(tname, mode) - os.rename(tname, fname) - except EnvironmentError as e: - error(str(e), exitcode=EXIT_OOPS) - except (EnvironmentError, IOError) as e: - error(str(e), exitcode=EXIT_OOPS) - finally: - if tname: - try: - pkg.portable.remove(tname) - except EnvironmentError as e: - if e.errno != errno.ENOENT: - raise + if e.errno != errno.ENOENT: + raise - return ret + return ret -def fmt_file(in_file, out_file): - lines = [] - saw_action = False - trailing_comments = [] - - for tp in read_line(in_file): - if tp[0] is None: - if saw_action and not tp[1]: - # Comments without a macro or transform - # nearby will be placed at the end if - # found after actions. - trailing_comments.extend(tp[2]) - continue - - # Any other comments, transforms, or unparseables - # will simply be printed back out wherever they - # were found before or after actions. - for l in tp[2]: - print(l, file=out_file) - if tp[1]: - print(tp[1], file=out_file) - else: - lines.append(tp) - saw_action = True - lines.sort(key=cmp_to_key(cmplines)) - for l in lines: - write_line(l, out_file) - out_file.writelines("\n".join(trailing_comments)) - if trailing_comments: - # Ensure file ends with newline. - out_file.write("\n") +def fmt_file(in_file, out_file): + lines = [] + saw_action = False + trailing_comments = [] + + for tp in read_line(in_file): + if tp[0] is None: + if saw_action and not tp[1]: + # Comments without a macro or transform + # nearby will be placed at the end if + # found after actions. + trailing_comments.extend(tp[2]) + continue + + # Any other comments, transforms, or unparseables + # will simply be printed back out wherever they + # were found before or after actions. + for l in tp[2]: + print(l, file=out_file) + if tp[1]: + print(tp[1], file=out_file) + else: + lines.append(tp) + saw_action = True + + lines.sort(key=cmp_to_key(cmplines)) + for l in lines: + write_line(l, out_file) + out_file.writelines("\n".join(trailing_comments)) + if trailing_comments: + # Ensure file ends with newline. + out_file.write("\n") if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - try: - __ret = main_func() - except (PipeError, KeyboardInterrupt): - # We don't want to display any messages here to prevent - # possible further broken pipe (EPIPE) errors. - __ret = 1 - except SystemExit as _e: - raise _e - except: - traceback.print_exc() - error(misc.get_traceback_message(), exitcode=None) - __ret = 99 - - sys.exit(__ret) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + try: + __ret = main_func() + except (PipeError, KeyboardInterrupt): + # We don't want to display any messages here to prevent + # possible further broken pipe (EPIPE) errors. + __ret = 1 + except SystemExit as _e: + raise _e + except: + traceback.print_exc() + error(misc.get_traceback_message(), exitcode=None) + __ret = 99 + + sys.exit(__ret) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/pkglint.py b/src/util/publish/pkglint.py index 28d95b2c0..fd6e909cd 100755 --- a/src/util/publish/pkglint.py +++ b/src/util/publish/pkglint.py @@ -24,7 +24,9 @@ # Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved. # -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() import codecs import logging import six @@ -51,301 +53,369 @@ logger = None + def error(message=""): - """Emit an error message prefixed by the command name. """ - misc.emsg("pkglint: {0}".format(message)) + """Emit an error message prefixed by the command name.""" + misc.emsg("pkglint: {0}".format(message)) + + if logger is not None: + logger.error(_("Error: {0}").format(message)) - if logger is not None: - logger.error(_("Error: {0}").format(message)) def msg(message): - logger.info(message) + logger.info(message) + def debug(message): - logger.debug(message) + logger.debug(message) + def main_func(): - """Start pkglint.""" - - global logger - - usage = \ - _("\n" - " %prog [-b build_no] [-c cache_dir] [-f file]\n" - " [-l uri ...] [-p regexp] [-r uri ...] [-v]\n" - " [-e extension_path ...]\n" - " manifest ...\n" - " %prog -L") - parser = OptionParser(usage=usage) - - parser.add_option("-b", dest="release", metavar="build_no", - help=_("build to use from lint and reference repositories")) - parser.add_option("-c", dest="cache", metavar="dir", - help=_("directory to use as a repository cache")) - parser.add_option("-f", dest="config", metavar="file", - help=_("specify an alternative pkglintrc file")) - parser.add_option("-l", dest="lint_uris", metavar="uri", - action="append", help=_("lint repository URI")) - parser.add_option("-L", dest="list_checks", - action="store_true", - help=_("list checks configured for this session and exit")) - parser.add_option("-p", dest="pattern", metavar="regexp", - help=_("pattern to match FMRIs in lint URI")) - parser.add_option("-r", dest="ref_uris", metavar="uri", - action="append", help=_("reference repository URI")) - parser.add_option("-e", dest="extension_path", metavar="dir", - action="append", help=_("extension_path")) - parser.add_option("-v", dest="verbose", action="store_true", - help=_("produce verbose output, overriding settings in pkglintrc")) - - opts, args = parser.parse_args(sys.argv[1:]) - - # without a cache option, we can't access repositories, so expect - # local manifests. - if not (opts.cache or opts.list_checks) and not args: - parser.error( - _("Required -c option missing, no local manifests provided." - )) - - pattern = opts.pattern - opts.ref_uris = _make_list(opts.ref_uris) - opts.lint_uris = _make_list(opts.lint_uris) - - logger = logging.getLogger("pkglint") - ch = logging.StreamHandler(sys.stdout) - - if opts.verbose: - logger.setLevel(logging.DEBUG) - ch.setLevel(logging.DEBUG) + """Start pkglint.""" + + global logger + + usage = _( + "\n" + " %prog [-b build_no] [-c cache_dir] [-f file]\n" + " [-l uri ...] [-p regexp] [-r uri ...] [-v]\n" + " [-e extension_path ...]\n" + " manifest ...\n" + " %prog -L" + ) + parser = OptionParser(usage=usage) + + parser.add_option( + "-b", + dest="release", + metavar="build_no", + help=_("build to use from lint and reference repositories"), + ) + parser.add_option( + "-c", + dest="cache", + metavar="dir", + help=_("directory to use as a repository cache"), + ) + parser.add_option( + "-f", + dest="config", + metavar="file", + help=_("specify an alternative pkglintrc file"), + ) + parser.add_option( + "-l", + dest="lint_uris", + metavar="uri", + action="append", + help=_("lint repository URI"), + ) + parser.add_option( + "-L", + dest="list_checks", + action="store_true", + help=_("list checks configured for this session and exit"), + ) + parser.add_option( + "-p", + dest="pattern", + metavar="regexp", + help=_("pattern to match FMRIs in lint URI"), + ) + parser.add_option( + "-r", + dest="ref_uris", + metavar="uri", + action="append", + help=_("reference repository URI"), + ) + parser.add_option( + "-e", + dest="extension_path", + metavar="dir", + action="append", + help=_("extension_path"), + ) + parser.add_option( + "-v", + dest="verbose", + action="store_true", + help=_("produce verbose output, overriding settings in pkglintrc"), + ) + + opts, args = parser.parse_args(sys.argv[1:]) + + # without a cache option, we can't access repositories, so expect + # local manifests. + if not (opts.cache or opts.list_checks) and not args: + parser.error( + _("Required -c option missing, no local manifests provided.") + ) + + pattern = opts.pattern + opts.ref_uris = _make_list(opts.ref_uris) + opts.lint_uris = _make_list(opts.lint_uris) + + logger = logging.getLogger("pkglint") + ch = logging.StreamHandler(sys.stdout) + + if opts.verbose: + logger.setLevel(logging.DEBUG) + ch.setLevel(logging.DEBUG) + + else: + logger.setLevel(logging.INFO) + ch.setLevel(logging.INFO) + + logger.addHandler(ch) + + lint_logger = log.PlainLogFormatter() + try: + if not opts.list_checks: + msg(_("Lint engine setup...")) + lint_engine = engine.LintEngine( + lint_logger, + config_file=opts.config, + verbose=opts.verbose, + extension_path=opts.extension_path, + ) + + if opts.list_checks: + list_checks( + lint_engine.checkers, + lint_engine.excluded_checkers, + opts.verbose, + ) + return EXIT_OK + + if (opts.lint_uris or opts.ref_uris) and not opts.cache: + parser.error( + _("Required -c option missing when using " "repositories.") + ) - else: - logger.setLevel(logging.INFO) - ch.setLevel(logging.INFO) + manifests = [] + if len(args) >= 1: + manifests = read_manifests(args, lint_logger) + if None in manifests or lint_logger.produced_lint_msgs(): + error(_("Fatal error in manifest - exiting.")) + return EXIT_OOPS + lint_engine.setup( + ref_uris=opts.ref_uris, + lint_uris=opts.lint_uris, + lint_manifests=manifests, + cache=opts.cache, + pattern=pattern, + release=opts.release, + ) - logger.addHandler(ch) + msg(_("Starting lint run...")) - lint_logger = log.PlainLogFormatter() - try: - if not opts.list_checks: - msg(_("Lint engine setup...")) - lint_engine = engine.LintEngine(lint_logger, - config_file=opts.config, verbose=opts.verbose, - extension_path=opts.extension_path) - - if opts.list_checks: - list_checks(lint_engine.checkers, - lint_engine.excluded_checkers, opts.verbose) - return EXIT_OK - - if (opts.lint_uris or opts.ref_uris) and not opts.cache: - parser.error( - _("Required -c option missing when using " - "repositories.")) - - manifests = [] - if len(args) >= 1: - manifests = read_manifests(args, lint_logger) - if None in manifests or \ - lint_logger.produced_lint_msgs(): - error(_("Fatal error in manifest - exiting.")) - return EXIT_OOPS - lint_engine.setup(ref_uris=opts.ref_uris, - lint_uris=opts.lint_uris, - lint_manifests=manifests, - cache=opts.cache, - pattern=pattern, - release=opts.release) - - msg(_("Starting lint run...")) - - lint_engine.execute() - lint_engine.teardown() - lint_logger.close() - - except engine.LintEngineSetupException as err: - # errors during setup are likely to be caused by bad - # input or configuration, not lint errors in manifests. - error(err) - return EXIT_BADOPT - - except engine.LintEngineException as err: - error(err) - return EXIT_OOPS + lint_engine.execute() + lint_engine.teardown() + lint_logger.close() + + except engine.LintEngineSetupException as err: + # errors during setup are likely to be caused by bad + # input or configuration, not lint errors in manifests. + error(err) + return EXIT_BADOPT + + except engine.LintEngineException as err: + error(err) + return EXIT_OOPS + + if lint_logger.produced_lint_msgs(): + return EXIT_OOPS + else: + return EXIT_OK - if lint_logger.produced_lint_msgs(): - return EXIT_OOPS - else: - return EXIT_OK def list_checks(checkers, exclude, verbose=False): - """Prints a human-readable version of configured checks.""" + """Prints a human-readable version of configured checks.""" - # used for justifying output - width = 28 + # used for justifying output + width = 28 - def get_method_desc(method, verbose): - if "pkglint_desc" in method.__dict__ and not verbose: - return method.pkglint_desc - else: - return "{0}.{1}.{2}".format(method.__self__.__class__.__module__, - method.__self__.__class__.__name__, - method.__func__.__name__) - - def emit(name, value): - msg("{0} {1}".format(name.ljust(width), value)) - - def print_list(items): - k = list(items.keys()) - k.sort() - for lint_id in k: - emit(lint_id, items[lint_id]) - - include_items = {} - exclude_items = {} - - for checker in checkers: - for m, lint_id in checker.included_checks: - include_items[lint_id] = get_method_desc(m, verbose) - - for checker in exclude: - for m, lint_id in checker.excluded_checks: - exclude_items[lint_id] = get_method_desc(m, verbose) - for m, lint_id in checker.included_checks: - exclude_items[lint_id] = get_method_desc(m, verbose) - - for checker in checkers: - for m, lint_id in checker.excluded_checks: - exclude_items[lint_id] = get_method_desc(m, verbose) - - if include_items or exclude_items: - if verbose: - emit(_("NAME"), _("METHOD")) - else: - emit(_("NAME"), _("DESCRIPTION")) - print_list(include_items) + def get_method_desc(method, verbose): + if "pkglint_desc" in method.__dict__ and not verbose: + return method.pkglint_desc + else: + return "{0}.{1}.{2}".format( + method.__self__.__class__.__module__, + method.__self__.__class__.__name__, + method.__func__.__name__, + ) + + def emit(name, value): + msg("{0} {1}".format(name.ljust(width), value)) + + def print_list(items): + k = list(items.keys()) + k.sort() + for lint_id in k: + emit(lint_id, items[lint_id]) + + include_items = {} + exclude_items = {} + + for checker in checkers: + for m, lint_id in checker.included_checks: + include_items[lint_id] = get_method_desc(m, verbose) + + for checker in exclude: + for m, lint_id in checker.excluded_checks: + exclude_items[lint_id] = get_method_desc(m, verbose) + for m, lint_id in checker.included_checks: + exclude_items[lint_id] = get_method_desc(m, verbose) + + for checker in checkers: + for m, lint_id in checker.excluded_checks: + exclude_items[lint_id] = get_method_desc(m, verbose) + + if include_items or exclude_items: + if verbose: + emit(_("NAME"), _("METHOD")) + else: + emit(_("NAME"), _("DESCRIPTION")) + print_list(include_items) - if exclude_items: - msg(_("\nExcluded checks:")) - print_list(exclude_items) + if exclude_items: + msg(_("\nExcluded checks:")) + print_list(exclude_items) -def read_manifests(names, lint_logger): - """Read a list of filenames, return a list of Manifest objects.""" - manifests = [] - for filename in names: - data = None - # borrowed code from publish.py - lines = [] # giant string of all input lines - linecnts = [] # tuples of starting line no., ending line no - linecounter = 0 # running total - try: - f = codecs.open(filename, "rb", "utf-8") - data = f.read() - except UnicodeDecodeError as e: - lint_logger.critical(_("Invalid file {file}: " - "manifest not encoded in UTF-8: {err}").format( - file=filename, err=e), - msgid="lint.manifest002") - continue - except IOError as e: - lint_logger.critical(_("Unable to read manifest file " - "{file}: {err}").format(file=filename, err=e), - msgid="lint.manifest001") - continue - lines.append(data) - linecnt = len(data.splitlines()) - linecnts.append((linecounter, linecounter + linecnt)) - linecounter += linecnt - - manifest = pkg.manifest.Manifest() - try: - manifest.set_content(content="\n".join(lines)) - except pkg.actions.ActionError as e: - lineno = e.lineno - for i, tup in enumerate(linecnts): - if lineno > tup[0] and lineno <= tup[1]: - lineno -= tup[0] - break - else: - lineno = "???" - - lint_logger.critical( - _("Error in {file} line: {ln}: {err} ").format( - file=filename, - ln=lineno, - err=str(e)), "lint.manifest002") - manifest = None - except InvalidPackageErrors as e: - lint_logger.critical( - _("Error in file {file}: {err}").format( - file=filename, - err=str(e)), "lint.manifest002") - manifest = None - - if manifest and "pkg.fmri" in manifest: - try: - manifest.fmri = \ - pkg.fmri.PkgFmri(manifest["pkg.fmri"]) - except fmri.IllegalFmri as e: - lint_logger.critical( - _("Error in file {file}: " - "{err}").format( - file=filename, err=e), - "lint.manifest002") - if manifest.fmri: - if not manifest.fmri.version: - lint_logger.critical( - _("Error in file {0}: " - "pkg.fmri does not include a " - "version string").format(filename), - "lint.manifest003") - else: - manifests.append(manifest) - - elif manifest: - lint_logger.critical( - _("Manifest {0} does not declare fmri.").format(filename), - "lint.manifest003") +def read_manifests(names, lint_logger): + """Read a list of filenames, return a list of Manifest objects.""" + + manifests = [] + for filename in names: + data = None + # borrowed code from publish.py + lines = [] # giant string of all input lines + linecnts = [] # tuples of starting line no., ending line no + linecounter = 0 # running total + try: + f = codecs.open(filename, "rb", "utf-8") + data = f.read() + except UnicodeDecodeError as e: + lint_logger.critical( + _( + "Invalid file {file}: " + "manifest not encoded in UTF-8: {err}" + ).format(file=filename, err=e), + msgid="lint.manifest002", + ) + continue + except IOError as e: + lint_logger.critical( + _("Unable to read manifest file " "{file}: {err}").format( + file=filename, err=e + ), + msgid="lint.manifest001", + ) + continue + lines.append(data) + linecnt = len(data.splitlines()) + linecnts.append((linecounter, linecounter + linecnt)) + linecounter += linecnt + + manifest = pkg.manifest.Manifest() + try: + manifest.set_content(content="\n".join(lines)) + except pkg.actions.ActionError as e: + lineno = e.lineno + for i, tup in enumerate(linecnts): + if lineno > tup[0] and lineno <= tup[1]: + lineno -= tup[0] + break + else: + lineno = "???" + + lint_logger.critical( + _("Error in {file} line: {ln}: {err} ").format( + file=filename, ln=lineno, err=str(e) + ), + "lint.manifest002", + ) + manifest = None + except InvalidPackageErrors as e: + lint_logger.critical( + _("Error in file {file}: {err}").format( + file=filename, err=str(e) + ), + "lint.manifest002", + ) + manifest = None + + if manifest and "pkg.fmri" in manifest: + try: + manifest.fmri = pkg.fmri.PkgFmri(manifest["pkg.fmri"]) + except fmri.IllegalFmri as e: + lint_logger.critical( + _("Error in file {file}: " "{err}").format( + file=filename, err=e + ), + "lint.manifest002", + ) + if manifest.fmri: + if not manifest.fmri.version: + lint_logger.critical( + _( + "Error in file {0}: " + "pkg.fmri does not include a " + "version string" + ).format(filename), + "lint.manifest003", + ) else: - manifests.append(None) - return manifests + manifests.append(manifest) + + elif manifest: + lint_logger.critical( + _("Manifest {0} does not declare fmri.").format(filename), + "lint.manifest003", + ) + else: + manifests.append(None) + return manifests + def _make_list(opt): - """Makes a list out of opt, and returns it.""" + """Makes a list out of opt, and returns it.""" - if isinstance(opt, list): - return opt - elif opt is None: - return [] - else: - return [opt] + if isinstance(opt, list): + return opt + elif opt is None: + return [] + else: + return [opt] if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - try: - __ret = main_func() - except (PipeError, KeyboardInterrupt): - # We don't want to display any messages here to prevent - # possible further broken pipe (EPIPE) errors. - __ret = EXIT_BADOPT - except SystemExit as __e: - __ret = __e.code - except (apx.InvalidDepotResponseException, tx.TransportFailures) as __e: - error(__e) - __ret = EXIT_BADOPT - except: - traceback.print_exc() - error(misc.get_traceback_message()) - __ret = 99 - - sys.exit(__ret) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + try: + __ret = main_func() + except (PipeError, KeyboardInterrupt): + # We don't want to display any messages here to prevent + # possible further broken pipe (EPIPE) errors. + __ret = EXIT_BADOPT + except SystemExit as __e: + __ret = __e.code + except (apx.InvalidDepotResponseException, tx.TransportFailures) as __e: + error(__e) + __ret = EXIT_BADOPT + except: + traceback.print_exc() + error(misc.get_traceback_message()) + __ret = 99 + + sys.exit(__ret) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/pkgmerge.py b/src/util/publish/pkgmerge.py index aa7b33ca7..086ba1097 100755 --- a/src/util/publish/pkgmerge.py +++ b/src/util/publish/pkgmerge.py @@ -24,86 +24,94 @@ # from __future__ import print_function -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() try: - import calendar - import collections - import getopt - import gettext - import itertools - import locale - import os - import shutil - import six - import sys - import tempfile - import traceback - - import pkg.actions as actions - import pkg.fmri - import pkg.client.api_errors as apx - import pkg.client.progress as progress - import pkg.client.publisher as publisher - import pkg.client.transport.transport as transport - import pkg.manifest as manifest - import pkg.misc as misc - import pkg.publish.transaction as trans - - from functools import reduce - from pkg.misc import PipeError, emsg, msg - from six.moves.urllib.parse import quote - from pkg.client.pkgdefs import (EXIT_OK, EXIT_OOPS, EXIT_BADOPT, - EXIT_PARTIAL) + import calendar + import collections + import getopt + import gettext + import itertools + import locale + import os + import shutil + import six + import sys + import tempfile + import traceback + + import pkg.actions as actions + import pkg.fmri + import pkg.client.api_errors as apx + import pkg.client.progress as progress + import pkg.client.publisher as publisher + import pkg.client.transport.transport as transport + import pkg.manifest as manifest + import pkg.misc as misc + import pkg.publish.transaction as trans + + from functools import reduce + from pkg.misc import PipeError, emsg, msg + from six.moves.urllib.parse import quote + from pkg.client.pkgdefs import EXIT_OK, EXIT_OOPS, EXIT_BADOPT, EXIT_PARTIAL except KeyboardInterrupt: - import sys - sys.exit(EXIT_OOPS) + import sys + + sys.exit(EXIT_OOPS) + class PkgmergeException(Exception): - """An exception raised if something goes wrong during the merging - process.""" - pass + """An exception raised if something goes wrong during the merging + process.""" + pass -catalog_dict = {} # hash table of catalogs by source uri -fmri_cache = {} + +catalog_dict = {} # hash table of catalogs by source uri +fmri_cache = {} manifest_cache = {} -null_manifest = manifest.Manifest() -tmpdir = None -dry_run = False -xport = None -dest_xport = None -pubs = set() -target_pub = None +null_manifest = manifest.Manifest() +tmpdir = None +dry_run = False +xport = None +dest_xport = None +pubs = set() +target_pub = None + def cleanup(): - """To be called at program finish.""" + """To be called at program finish.""" - if tmpdir: - shutil.rmtree(tmpdir, ignore_errors=True) + if tmpdir: + shutil.rmtree(tmpdir, ignore_errors=True) + + if dry_run: + return + + # Attempt to kick off a refresh of target repository for each + # publisher before exiting. + for pfx in pubs: + target_pub.prefix = pfx + try: + dest_xport.publish_refresh_packages(target_pub) + except apx.TransportError: + # If this fails, ignore it as this was a last + # ditch attempt anyway. + break - if dry_run: - return - - # Attempt to kick off a refresh of target repository for each - # publisher before exiting. - for pfx in pubs: - target_pub.prefix = pfx - try: - dest_xport.publish_refresh_packages(target_pub) - except apx.TransportError: - # If this fails, ignore it as this was a last - # ditch attempt anyway. - break def usage(errmsg="", exitcode=2): - """Emit a usage message and optionally prefix it with a more specific - error message. Causes program to exit.""" + """Emit a usage message and optionally prefix it with a more specific + error message. Causes program to exit.""" - if errmsg: - emsg("pkgmerge: {0}".format(errmsg)) + if errmsg: + emsg("pkgmerge: {0}".format(errmsg)) - msg(_("""\ + msg( + _( + """\ Usage: pkgmerge [-n] -d dest_repo -s variant=value[,...],src_repo ... [-p publisher_prefix ... ] [pkg_fmri_pattern ...] @@ -139,864 +147,912 @@ def usage(errmsg="", exitcode=2): TMPDIR, TEMP, TMP The absolute path of the directory where temporary data should be stored during program execution. -""")) +""" + ) + ) + + sys.exit(exitcode) - sys.exit(exitcode) def error(text, exitcode=EXIT_OOPS): - """Emit an error message prefixed by the command name """ + """Emit an error message prefixed by the command name""" - emsg("pkgmerge: {0}".format(text)) + emsg("pkgmerge: {0}".format(text)) + + if exitcode != None: + sys.exit(exitcode) - if exitcode != None: - sys.exit(exitcode) def get_tracker(): - try: - progresstracker = \ - progress.FancyUNIXProgressTracker() - except progress.ProgressTrackerException: - progresstracker = progress.CommandLineProgressTracker() - return progresstracker + try: + progresstracker = progress.FancyUNIXProgressTracker() + except progress.ProgressTrackerException: + progresstracker = progress.CommandLineProgressTracker() + return progresstracker + def load_catalog(repouri, pub): - """Load catalog from specified uri""" - # Pull catalog only from this host - pub.repository.origins = [repouri] - pub.refresh(full_refresh=True, immediate=True) - - catalog_dict[repouri.uri] = dict( - (name, [ - entry - for entry in pub.catalog.fmris_by_version(name) - ]) - for name in pub.catalog.names() - ) + """Load catalog from specified uri""" + # Pull catalog only from this host + pub.repository.origins = [repouri] + pub.refresh(full_refresh=True, immediate=True) + + catalog_dict[repouri.uri] = dict( + (name, [entry for entry in pub.catalog.fmris_by_version(name)]) + for name in pub.catalog.names() + ) + + # Discard catalog. + pub.remove_meta_root() + # XXX At the moment, the only way to force the publisher object to + # discard its copy of a catalog is to set repository. + pub.repository = pub.repository - # Discard catalog. - pub.remove_meta_root() - # XXX At the moment, the only way to force the publisher object to - # discard its copy of a catalog is to set repository. - pub.repository = pub.repository def get_all_pkg_names(repouri): - return list(catalog_dict[repouri.uri].keys()) + return list(catalog_dict[repouri.uri].keys()) + def get_manifest(repouri, fmri): - """Fetch the manifest for package-fmri 'fmri' from the source - in 'repouri'... return as Manifest object.""" + """Fetch the manifest for package-fmri 'fmri' from the source + in 'repouri'... return as Manifest object.""" + + # support null manifests to keep lists ordered for merge + if not fmri: + return null_manifest - # support null manifests to keep lists ordered for merge - if not fmri: - return null_manifest + mfst_str = xport.get_manifest(fmri, pub=repouri, content_only=True) + m = manifest.Manifest(fmri) + m.set_content(content=mfst_str) + return m - mfst_str = xport.get_manifest(fmri, pub=repouri, content_only=True) - m = manifest.Manifest(fmri) - m.set_content(content=mfst_str) - return m def main_func(): - global dry_run, tmpdir, xport, dest_xport, target_pub + global dry_run, tmpdir, xport, dest_xport, target_pub + + dest_repo = None + source_list = [] + variant_list = [] + pub_list = [] + use_pub_list = False + + try: + opts, pargs = getopt.getopt(sys.argv[1:], "d:np:s:?", ["help"]) + for opt, arg in opts: + if opt == "-d": + dest_repo = misc.parse_uri(arg) + elif opt == "-n": + dry_run = True + elif opt == "-s": + s = arg.split(",") + if len(s) < 2: + usage("-s option must specify " "variant=value,repo_uri") + + # All but last part should be variant. + src_vars = {} + for v in s[:-1]: + try: + vname, vval = v.split("=") + except ValueError: + usage( + "-s option must specify " "variant=value,repo_uri" + ) - dest_repo = None - source_list = [] - variant_list = [] - pub_list = [] - use_pub_list = False + if not vname.startswith("variant."): + vname = "variant.{0}".format(vname) + src_vars[vname] = vval - try: - opts, pargs = getopt.getopt(sys.argv[1:], "d:np:s:?", - ["help"]) - for opt, arg in opts: - if opt == "-d": - dest_repo = misc.parse_uri(arg) - elif opt == "-n": - dry_run = True - elif opt == "-s": - s = arg.split(",") - if len(s) < 2: - usage("-s option must specify " - "variant=value,repo_uri") - - # All but last part should be variant. - src_vars = {} - for v in s[:-1]: - try: - vname, vval = v.split("=") - except ValueError: - usage("-s option must specify " - "variant=value,repo_uri") - - if not vname.startswith("variant."): - vname = "variant.{0}".format(vname) - src_vars[vname] = vval - - variant_list.append(src_vars) - source_list.append(publisher.RepositoryURI( - misc.parse_uri(s[-1]))) - elif opt == "-p": - use_pub_list = True - pub_list.append(arg) - - if opt in ("--help", "-?"): - usage(exitcode=0) - except getopt.GetoptError as e: - usage(_("illegal option -- {0}").format(e.opt)) - - if not source_list: - usage(_("At least one variant name, value, and package source " - "must be provided using -s.")) - - if not dest_repo: - usage(_("A destination package repository must be provided " - "using -d.")) - - # Determine the unique set of variants across all sources. - variants = set() - vcombos = collections.defaultdict(set) - for src_vars in variant_list: - for v, vval in six.iteritems(src_vars): - variants.add(v) - vcombos[v].add((v, vval)) - - # merge_fmris() expects this to be a list. Sort it to make sure - # combo is determinstic in the later construction. - variants = sorted(variants, reverse=True) - - # Require that the user specified the same variants for all sources. - for i, src_vars in enumerate(variant_list): - missing = set(v for v in variants if v not in variant_list[i]) - if missing: - missing = ", ".join(missing) - source = source_list[i] - usage(_("Source {source} missing values for " - "variants: {missing}").format(**locals())) - - # Require that each unique variant combination has a source. - for combo in itertools.product(*vcombos.values()): - found = False - for i, src in enumerate(source_list): - for vname, vval in combo: - if variant_list[i].get(vname, - None) != vval: - found = False - break - else: - found = True - break - - if not found: - combo = " ".join( - "{0}={1}".format(vname, vval) - for vname, vval in combo - ) - usage(_("No source was specified for variant " - "combination {combo}.").format(**locals())) - - # initialize transport - # we use a single endpoint for now, since the transport code - # uses publisher as a unique key... so we just flop the repo - # list as needed to access the different catalogs/manifests/files. - temp_root = misc.config_temp_root() - - tmpdir = tempfile.mkdtemp(dir=temp_root, prefix="pkgmerge") - xport, xport_cfg = transport.setup_transport() - xport_cfg.incoming_root = tmpdir - - # we don't use the publisher returned by setup_publisher, as that only - # returns one of the publishers in source_list. Instead we use - # xport_cfg to access all publishers. - transport.setup_publisher(source_list, - "pkgmerge", xport, xport_cfg, remote_prefix=True) - cat_dir = tempfile.mkdtemp(dir=tmpdir) - - # we must have at least one matching publisher if -p was used. - known_pubs = set([pub.prefix for pub in xport_cfg.gen_publishers()]) - if pub_list and len(set(pub_list).intersection(known_pubs)) == 0: - error(_("no publishers from source repositories match " - "the given -p options.")) - - errors = set() - tracker = get_tracker() - - # iterate over all publishers in our source repositories. If errors - # are encountered for a given publisher, we accumulate those, and - # skip to the next publisher. - for pub in xport_cfg.gen_publishers(): - - if use_pub_list: - if pub.prefix not in pub_list: - continue - else: - # remove publishers from pub_list as we go, so - # that when we're finished, any remaining - # publishers in pub_list suggest superfluous - # -p options, which will cause us to exit with - # an error. - pub_list.remove(pub.prefix) - - pub.meta_root = cat_dir - pub.transport = xport - - # Use separate transport for destination repository in case - # source and destination have identical publisher configuration. - dest_xport, dest_xport_cfg = transport.setup_transport() - dest_xport_cfg.incoming_root = tmpdir - - # retrieve catalogs for all specified repositories - for s in source_list: - load_catalog(s, pub) - - # determine the list of packages we'll be processing - if not pargs: - # use the latest versions and merge everything - fmri_arguments = list(set( - name - for s in source_list - for name in get_all_pkg_names(s) - )) - exclude_args = [] - else: - fmri_arguments = [ - f - for f in pargs - if not f.startswith("!") - ] + variant_list.append(src_vars) + source_list.append( + publisher.RepositoryURI(misc.parse_uri(s[-1])) + ) + elif opt == "-p": + use_pub_list = True + pub_list.append(arg) + + if opt in ("--help", "-?"): + usage(exitcode=0) + except getopt.GetoptError as e: + usage(_("illegal option -- {0}").format(e.opt)) + + if not source_list: + usage( + _( + "At least one variant name, value, and package source " + "must be provided using -s." + ) + ) - exclude_args = [ - f[1:] - for f in pargs - if f.startswith("!") - ] + if not dest_repo: + usage( + _("A destination package repository must be provided " "using -d.") + ) - # build fmris to be merged - masterlist = [ - build_merge_list(fmri_arguments, exclude_args, - catalog_dict[s.uri]) - for s in source_list - ] - - # check for unmatched patterns - in_none = reduce(lambda x, y: x & y, - (set(u) for d, u in masterlist)) - if in_none: - errors.add( - _("The following pattern(s) did not match any " - "packages in any of the specified repositories for " - "publisher {pub_name}:" - "\n{patterns}").format(patterns="\n".join(in_none), - pub_name=pub.prefix)) - continue + # Determine the unique set of variants across all sources. + variants = set() + vcombos = collections.defaultdict(set) + for src_vars in variant_list: + for v, vval in six.iteritems(src_vars): + variants.add(v) + vcombos[v].add((v, vval)) + + # merge_fmris() expects this to be a list. Sort it to make sure + # combo is determinstic in the later construction. + variants = sorted(variants, reverse=True) + + # Require that the user specified the same variants for all sources. + for i, src_vars in enumerate(variant_list): + missing = set(v for v in variants if v not in variant_list[i]) + if missing: + missing = ", ".join(missing) + source = source_list[i] + usage( + _( + "Source {source} missing values for " "variants: {missing}" + ).format(**locals()) + ) + + # Require that each unique variant combination has a source. + for combo in itertools.product(*vcombos.values()): + found = False + for i, src in enumerate(source_list): + for vname, vval in combo: + if variant_list[i].get(vname, None) != vval: + found = False + break + else: + found = True + break + + if not found: + combo = " ".join( + "{0}={1}".format(vname, vval) for vname, vval in combo + ) + usage( + _( + "No source was specified for variant " + "combination {combo}." + ).format(**locals()) + ) + + # initialize transport + # we use a single endpoint for now, since the transport code + # uses publisher as a unique key... so we just flop the repo + # list as needed to access the different catalogs/manifests/files. + temp_root = misc.config_temp_root() + + tmpdir = tempfile.mkdtemp(dir=temp_root, prefix="pkgmerge") + xport, xport_cfg = transport.setup_transport() + xport_cfg.incoming_root = tmpdir + + # we don't use the publisher returned by setup_publisher, as that only + # returns one of the publishers in source_list. Instead we use + # xport_cfg to access all publishers. + transport.setup_publisher( + source_list, "pkgmerge", xport, xport_cfg, remote_prefix=True + ) + cat_dir = tempfile.mkdtemp(dir=tmpdir) + + # we must have at least one matching publisher if -p was used. + known_pubs = set([pub.prefix for pub in xport_cfg.gen_publishers()]) + if pub_list and len(set(pub_list).intersection(known_pubs)) == 0: + error( + _( + "no publishers from source repositories match " + "the given -p options." + ) + ) - # generate set of all package names to be processed, and dict - # of lists indexed by order in source_list; if that repo has no - # fmri for this pkg then use None. - allpkgs = set(name for d, u in masterlist for name in d) - - processdict = {} - for p in allpkgs: - for d, u in masterlist: - processdict.setdefault(p, []).append( - d.setdefault(p, None)) - - # check to make sure all fmris are at same version modulo - # timestamp - for entry in processdict: - if len(set([ - str(a).rsplit(":")[0] - for a in processdict[entry] - if a is not None - ])) > 1: - errors.add( - _("fmris matching the following patterns do" - " not have matching versions across all " - "repositories for publisher {pubs}: " - "{patterns}").format(pub=pub.prefix, - patterns=processdict[entry])) - continue - - target_pub = transport.setup_publisher(dest_repo, - pub.prefix, dest_xport, dest_xport_cfg, - remote_prefix=True) - - tracker.republish_set_goal(len(processdict), 0, 0) - # republish packages for this publisher. If we encounter any - # publication errors, we move on to the next publisher. - try: - pkg_tmpdir = tempfile.mkdtemp(dir=tmpdir) - republish_packages(pub, target_pub, - processdict, source_list, variant_list, variants, - tracker, xport, dest_repo, dest_xport, pkg_tmpdir, - dry_run=dry_run) - except (trans.TransactionError, PkgmergeException) as e: - errors.add(str(e)) - tracker.reset() - continue - finally: - # if we're handling an exception, this still gets called - # in spite of the 'continue' that the handler ends with. - if os.path.exists(pkg_tmpdir): - shutil.rmtree(pkg_tmpdir) - - tracker.republish_done(dryrun=dry_run) - tracker.reset() - - # If -p options were supplied, we should have processed all of them - # by now. Remaining entries suggest -p options that were not merged. - if use_pub_list and pub_list: - errors.add(_("the following publishers were not found in " - "source repositories: {0}").format(" ".join(pub_list))) - - # If we have encountered errors for some publishers, print them now - # and exit. - tracker.flush() - for message in errors: - error(message, exitcode=None) - if errors: - exit(EXIT_OOPS) - - return EXIT_OK - -def republish_packages(pub, target_pub, processdict, source_list, variant_list, - variants, tracker, xport, dest_repo, dest_xport, pkg_tmpdir, - dry_run=False): - """Republish packages for publisher pub to dest_repo. - - If we try to republish a package that we have already published, - an exception is raise. - - pub the publisher from source_list that we are republishing - target_pub the destination publisher - processdict a dict indexed by package name of the pkgs to merge - source_list a list of source respositories - variant_list a list of dicts containing variant names/values - variants the unique set of variants across all sources. - tracker a progress tracker - xport the transport handling our source repositories - dest_repo our destination repository - dest_xport the transport handling our destination repository - pkg_tmpdir a temporary dir used when downloading pkg content - which may be deleted and recreated by this method. - - dry_run True if we should not actually publish - """ - - def get_basename(pfmri): - open_time = pfmri.get_timestamp() - return "{0:d}_{0}".format( - calendar.timegm(open_time.utctimetuple()), - quote(str(pfmri), "")) + errors = set() + tracker = get_tracker() + + # iterate over all publishers in our source repositories. If errors + # are encountered for a given publisher, we accumulate those, and + # skip to the next publisher. + for pub in xport_cfg.gen_publishers(): + if use_pub_list: + if pub.prefix not in pub_list: + continue + else: + # remove publishers from pub_list as we go, so + # that when we're finished, any remaining + # publishers in pub_list suggest superfluous + # -p options, which will cause us to exit with + # an error. + pub_list.remove(pub.prefix) + + pub.meta_root = cat_dir + pub.transport = xport + + # Use separate transport for destination repository in case + # source and destination have identical publisher configuration. + dest_xport, dest_xport_cfg = transport.setup_transport() + dest_xport_cfg.incoming_root = tmpdir + + # retrieve catalogs for all specified repositories + for s in source_list: + load_catalog(s, pub) + + # determine the list of packages we'll be processing + if not pargs: + # use the latest versions and merge everything + fmri_arguments = list( + set(name for s in source_list for name in get_all_pkg_names(s)) + ) + exclude_args = [] + else: + fmri_arguments = [f for f in pargs if not f.startswith("!")] + + exclude_args = [f[1:] for f in pargs if f.startswith("!")] + + # build fmris to be merged + masterlist = [ + build_merge_list(fmri_arguments, exclude_args, catalog_dict[s.uri]) + for s in source_list + ] + # check for unmatched patterns + in_none = reduce(lambda x, y: x & y, (set(u) for d, u in masterlist)) + if in_none: + errors.add( + _( + "The following pattern(s) did not match any " + "packages in any of the specified repositories for " + "publisher {pub_name}:" + "\n{patterns}" + ).format(patterns="\n".join(in_none), pub_name=pub.prefix) + ) + continue + + # generate set of all package names to be processed, and dict + # of lists indexed by order in source_list; if that repo has no + # fmri for this pkg then use None. + allpkgs = set(name for d, u in masterlist for name in d) + + processdict = {} + for p in allpkgs: + for d, u in masterlist: + processdict.setdefault(p, []).append(d.setdefault(p, None)) + + # check to make sure all fmris are at same version modulo + # timestamp for entry in processdict: - man, retrievals = merge_fmris(source_list, - processdict[entry], variant_list, variants) - - # Determine total bytes to retrieve for this package; this must - # be done using the retrievals dict since they are coalesced by - # hash. - getbytes = sum( - misc.get_pkg_otw_size(a) - for i, uri in enumerate(source_list) - for a in retrievals[i] + if ( + len( + set( + [ + str(a).rsplit(":")[0] + for a in processdict[entry] + if a is not None + ] + ) + ) + > 1 + ): + errors.add( + _( + "fmris matching the following patterns do" + " not have matching versions across all " + "repositories for publisher {pubs}: " + "{patterns}" + ).format(pub=pub.prefix, patterns=processdict[entry]) ) + continue + + target_pub = transport.setup_publisher( + dest_repo, + pub.prefix, + dest_xport, + dest_xport_cfg, + remote_prefix=True, + ) - # Determine total bytes to send for this package; this must be - # done using the manifest since retrievals are coalesced based - # on hash, but sends are not. - f = man.fmri - target_pub.prefix = f.publisher - sendbytes = dest_xport.get_transfer_size(target_pub, - man.gen_actions()) + tracker.republish_set_goal(len(processdict), 0, 0) + # republish packages for this publisher. If we encounter any + # publication errors, we move on to the next publisher. + try: + pkg_tmpdir = tempfile.mkdtemp(dir=tmpdir) + republish_packages( + pub, + target_pub, + processdict, + source_list, + variant_list, + variants, + tracker, + xport, + dest_repo, + dest_xport, + pkg_tmpdir, + dry_run=dry_run, + ) + except (trans.TransactionError, PkgmergeException) as e: + errors.add(str(e)) + tracker.reset() + continue + finally: + # if we're handling an exception, this still gets called + # in spite of the 'continue' that the handler ends with. + if os.path.exists(pkg_tmpdir): + shutil.rmtree(pkg_tmpdir) - tracker.republish_start_pkg(f, getbytes=getbytes, - sendbytes=sendbytes) + tracker.republish_done(dryrun=dry_run) + tracker.reset() + + # If -p options were supplied, we should have processed all of them + # by now. Remaining entries suggest -p options that were not merged. + if use_pub_list and pub_list: + errors.add( + _( + "the following publishers were not found in " + "source repositories: {0}" + ).format(" ".join(pub_list)) + ) - if dry_run: - # Dry-run; attempt a merge of everything but don't - # write any data or publish packages. - continue + # If we have encountered errors for some publishers, print them now + # and exit. + tracker.flush() + for message in errors: + error(message, exitcode=None) + if errors: + exit(EXIT_OOPS) + + return EXIT_OK + + +def republish_packages( + pub, + target_pub, + processdict, + source_list, + variant_list, + variants, + tracker, + xport, + dest_repo, + dest_xport, + pkg_tmpdir, + dry_run=False, +): + """Republish packages for publisher pub to dest_repo. + + If we try to republish a package that we have already published, + an exception is raise. + + pub the publisher from source_list that we are republishing + target_pub the destination publisher + processdict a dict indexed by package name of the pkgs to merge + source_list a list of source respositories + variant_list a list of dicts containing variant names/values + variants the unique set of variants across all sources. + tracker a progress tracker + xport the transport handling our source repositories + dest_repo our destination repository + dest_xport the transport handling our destination repository + pkg_tmpdir a temporary dir used when downloading pkg content + which may be deleted and recreated by this method. + + dry_run True if we should not actually publish + """ + + def get_basename(pfmri): + open_time = pfmri.get_timestamp() + return "{0:d}_{0}".format( + calendar.timegm(open_time.utctimetuple()), quote(str(pfmri), "") + ) + + for entry in processdict: + man, retrievals = merge_fmris( + source_list, processdict[entry], variant_list, variants + ) + + # Determine total bytes to retrieve for this package; this must + # be done using the retrievals dict since they are coalesced by + # hash. + getbytes = sum( + misc.get_pkg_otw_size(a) + for i, uri in enumerate(source_list) + for a in retrievals[i] + ) + + # Determine total bytes to send for this package; this must be + # done using the manifest since retrievals are coalesced based + # on hash, but sends are not. + f = man.fmri + target_pub.prefix = f.publisher + sendbytes = dest_xport.get_transfer_size(target_pub, man.gen_actions()) + + tracker.republish_start_pkg(f, getbytes=getbytes, sendbytes=sendbytes) + + if dry_run: + # Dry-run; attempt a merge of everything but don't + # write any data or publish packages. + continue + + # Retrieve package data from each package source. + for i, uri in enumerate(source_list): + pub.repository.origins = [uri] + mfile = xport.multi_file_ni( + pub, pkg_tmpdir, decompress=True, progtrack=tracker + ) + for a in retrievals[i]: + mfile.add_action(a) + mfile.wait_files() + + trans_id = get_basename(f) + pkg_name = f.get_fmri() + pubs.add(target_pub.prefix) + # Publish merged package. + t = trans.Transaction( + dest_repo, + pkg_name=pkg_name, + trans_id=trans_id, + xport=dest_xport, + pub=target_pub, + progtrack=tracker, + ) + + # Remove any previous failed attempt to + # to republish this package. + try: + t.close(abandon=True) + except: + # It might not exist already. + pass + + t.open() + for a in man.gen_actions(): + if a.name == "set" and a.attrs["name"] == "pkg.fmri": + # To be consistent with the + # server, the fmri can't be + # added to the manifest. + continue + + if hasattr(a, "hash"): + fname = os.path.join(pkg_tmpdir, a.hash) + a.data = lambda: open(fname, "rb") + t.add(a) + + # Always defer catalog update. + t.close(add_to_catalog=False) + + # Done with this package. + tracker.republish_end_pkg(f) + + # Dump retrieved package data after each republication and + # recreate the directory for the next package. + shutil.rmtree(pkg_tmpdir) + os.mkdir(pkg_tmpdir) - # Retrieve package data from each package source. - for i, uri in enumerate(source_list): - pub.repository.origins = [uri] - mfile = xport.multi_file_ni(pub, pkg_tmpdir, - decompress=True, progtrack=tracker) - for a in retrievals[i]: - mfile.add_action(a) - mfile.wait_files() - - trans_id = get_basename(f) - pkg_name = f.get_fmri() - pubs.add(target_pub.prefix) - # Publish merged package. - t = trans.Transaction(dest_repo, - pkg_name=pkg_name, trans_id=trans_id, - xport=dest_xport, pub=target_pub, - progtrack=tracker) - - # Remove any previous failed attempt to - # to republish this package. - try: - t.close(abandon=True) - except: - # It might not exist already. - pass - - t.open() - for a in man.gen_actions(): - if (a.name == "set" and - a.attrs["name"] == "pkg.fmri"): - # To be consistent with the - # server, the fmri can't be - # added to the manifest. - continue - - if hasattr(a, "hash"): - fname = os.path.join(pkg_tmpdir, - a.hash) - a.data = lambda: open( - fname, "rb") - t.add(a) - - # Always defer catalog update. - t.close(add_to_catalog=False) - - # Done with this package. - tracker.republish_end_pkg(f) - - # Dump retrieved package data after each republication and - # recreate the directory for the next package. - shutil.rmtree(pkg_tmpdir) - os.mkdir(pkg_tmpdir) def merge_fmris(source_list, fmri_list, variant_list, variants): - """Merge a list of manifests representing multiple variants, - returning the merged manifest and a list of lists of actions to - retrieve from each source""" - - # Merge each variant one at a time. - merged = {} - # where to find files... - hash_source = {} - - for i, variant in enumerate(variants): - # Build the unique list of remaining variant combinations to - # use for merging this variant. - combos = set( - tuple( - (vname, src_vars[vname]) - for vname in variants[i + 1:] - ) - for src_vars in variant_list - ) + """Merge a list of manifests representing multiple variants, + returning the merged manifest and a list of lists of actions to + retrieve from each source""" + + # Merge each variant one at a time. + merged = {} + # where to find files... + hash_source = {} + + for i, variant in enumerate(variants): + # Build the unique list of remaining variant combinations to + # use for merging this variant. + combos = set( + tuple((vname, src_vars[vname]) for vname in variants[i + 1 :]) + for src_vars in variant_list + ) + + if not combos: + # If there are no other variants to combine, then simply + # combine all manifests. + combos = [()] + + # Perform the variant merge for each unique combination of + # remaining variants. For example, a pkgmerge of: + # -s arch=sparc,debug=false,flavor=32,... + # -s arch=sparc,debug=false,flavor=64,... + # -s arch=sparc,debug=true,flavor=32,... + # -s arch=sparc,debug=true,flavor=64,... + # -s arch=i386,debug=false,flavor=32,... + # -s arch=i386,debug=false,flavor=64,... + # -s arch=i386,debug=true,flavor=32,... + # -s arch=i386,debug=true,flavor=64,... + # + # ...would produce the following combinations for each variant: + # variant.arch + # debug=false, flavor=32 + # debug=false, flavor=64 + # debug=true, flavor=32 + # debug=true, flavor=64 + # variant.debug + # flavor=32 + # flavor=64 + # variant.flavor + # + for combo in combos: + # Build the list of sources, fmris, and variant values + # involved in this particular combo merge. + slist = [] + flist = [] + vlist = [] + sindex = [] + new_fmri = None + for j, src in enumerate(source_list): + if combo: + # If filtering on a specific combination + # then skip this source if any of the + # combination parameters don't match. + skip = False + for vname, vval in combo: + if variant_list[j].get(vname, None) != vval: + skip = True + break + + if skip: + continue + + # Skip this source if it doesn't have a matching + # package to merge, or if it has already been + # merged with another package. + pfmri = fmri_list[j] + if not pfmri or merged.get(id(pfmri), None) == null_manifest: + continue + + # The newest FMRI in the set of manifests being + # merged will be used as the new FMRI of the + # merged package. + if new_fmri is None or pfmri.version > new_fmri.version: + new_fmri = pfmri + + sindex.append(j) + slist.append(src) + flist.append(pfmri) + vlist.append(variant_list[j][variant]) + + if not flist: + # Nothing to merge for this combination. + continue + + # Build the list of manifests to be merged. + mlist = [] + for j, s, f in zip(sindex, slist, flist): + if id(f) in merged: + # Manifest already merged before, use + # the merged version. + m = merged[id(f)] + else: + # Manifest not yet merged, retrieve + # from source; record those w/ payloads + # so we know from where to get them.. + m = get_manifest(s, f) + for a in m.gen_actions(): + if a.has_payload: + hash_source.setdefault(a.hash, j) + mlist.append(m) + + m = __merge_fmris(new_fmri, mlist, flist, vlist, variant) + + for f in flist: + if id(f) == id(new_fmri): + # This FMRI was used for the merged + # manifest; any future merges should + # use the merged manifest for this + # FMRI. + merged[id(f)] = m + else: + # This package has been merged with + # another so shouldn't be retrieved + # or merged again. + merged[id(f)] = null_manifest - if not combos: - # If there are no other variants to combine, then simply - # combine all manifests. - combos = [()] - - # Perform the variant merge for each unique combination of - # remaining variants. For example, a pkgmerge of: - # -s arch=sparc,debug=false,flavor=32,... - # -s arch=sparc,debug=false,flavor=64,... - # -s arch=sparc,debug=true,flavor=32,... - # -s arch=sparc,debug=true,flavor=64,... - # -s arch=i386,debug=false,flavor=32,... - # -s arch=i386,debug=false,flavor=64,... - # -s arch=i386,debug=true,flavor=32,... - # -s arch=i386,debug=true,flavor=64,... - # - # ...would produce the following combinations for each variant: - # variant.arch - # debug=false, flavor=32 - # debug=false, flavor=64 - # debug=true, flavor=32 - # debug=true, flavor=64 - # variant.debug - # flavor=32 - # flavor=64 - # variant.flavor - # - for combo in combos: - # Build the list of sources, fmris, and variant values - # involved in this particular combo merge. - slist = [] - flist = [] - vlist = [] - sindex = [] - new_fmri = None - for j, src in enumerate(source_list): - if combo: - # If filtering on a specific combination - # then skip this source if any of the - # combination parameters don't match. - skip = False - for vname, vval in combo: - if variant_list[j].get(vname, - None) != vval: - skip = True - break - - if skip: - continue - - # Skip this source if it doesn't have a matching - # package to merge, or if it has already been - # merged with another package. - pfmri = fmri_list[j] - if not pfmri or \ - merged.get(id(pfmri), None) == null_manifest: - continue - - # The newest FMRI in the set of manifests being - # merged will be used as the new FMRI of the - # merged package. - if new_fmri is None or pfmri.version > new_fmri.version: - new_fmri = pfmri - - sindex.append(j) - slist.append(src) - flist.append(pfmri) - vlist.append(variant_list[j][variant]) - - if not flist: - # Nothing to merge for this combination. - continue - - # Build the list of manifests to be merged. - mlist = [] - for j, s, f in zip(sindex, slist, flist): - if id(f) in merged: - # Manifest already merged before, use - # the merged version. - m = merged[id(f)] - else: - # Manifest not yet merged, retrieve - # from source; record those w/ payloads - # so we know from where to get them.. - m = get_manifest(s, f) - for a in m.gen_actions(): - if a.has_payload: - hash_source.setdefault(a.hash, j) - mlist.append(m) - - m = __merge_fmris(new_fmri, mlist, flist, vlist, - variant) - - for f in flist: - if id(f) == id(new_fmri): - # This FMRI was used for the merged - # manifest; any future merges should - # use the merged manifest for this - # FMRI. - merged[id(f)] = m - else: - # This package has been merged with - # another so shouldn't be retrieved - # or merged again. - merged[id(f)] = null_manifest - - # Merge process should have resulted in a single non-null manifest. - m = [v for v in merged.values() if v != null_manifest] - assert len(m) == 1 - m = m[0] - - # Finally, build a list of actions to retrieve based on position in - # source_list. - - retrievals = [list() for i in source_list] - - for a in m.gen_actions(): - if a.has_payload: - source = hash_source.pop(a.hash, None) - if source is not None: - retrievals[source].append(a) - return m, retrievals + # Merge process should have resulted in a single non-null manifest. + m = [v for v in merged.values() if v != null_manifest] + assert len(m) == 1 + m = m[0] + + # Finally, build a list of actions to retrieve based on position in + # source_list. + + retrievals = [list() for i in source_list] + + for a in m.gen_actions(): + if a.has_payload: + source = hash_source.pop(a.hash, None) + if source is not None: + retrievals[source].append(a) + return m, retrievals def __merge_fmris(new_fmri, manifest_list, fmri_list, variant_list, variant): - """Private merge implementation.""" - - # Remove variant tags, package variant metadata, and signatures - # from manifests since we're reassigning. This allows merging - # pre-tagged, already merged pkgs, or signed packages. - - blended_actions = [] - blend_names = set([variant, variant[8:]]) - - for j, m in enumerate(manifest_list): - deleted_count = 0 - vval = variant_list[j] - for i, a in enumerate(m.actions[:]): - if a.name == "signature" or \ - (a.name == "set" and a.attrs["name"] == "pkg.fmri"): - # signatures and pkg.fmri actions are no longer - # valid after merging - del m.actions[i - deleted_count] - deleted_count += 1 - continue - - if variant in a.attrs: - if a.attrs[variant] != vval: - # we have an already merged - # manifest; filter out actions - # for other variants - del m.actions[i - deleted_count] - deleted_count += 1 - continue - else: - del a.attrs[variant] - - if a.name == "set" and a.attrs["name"] == variant: - if vval not in a.attrlist("value"): - raise PkgmergeException( - _("package {pkg} is tagged as " - "not supporting {var_name} " - "{var_value}").format( - pkg=fmri_list[j], - var_name=variant, - var_value=vval)) - del m.actions[i - deleted_count] - deleted_count += 1 - # checking if we're supposed to blend this action - # for this variant. Handle prepended "variant.". - if blend_names & set(a.attrlist("pkg.merge.blend")): - blended_actions.append((j, a)) - - # add blended actions to other manifests - for j, m in enumerate(manifest_list): - for k, a in blended_actions: - if k != j: - m.actions.append(a) - - # Like the unix utility comm, except that this function - # takes an arbitrary number of manifests and compares them, - # returning a tuple consisting of each manifest's actions - # that are not the same for all manifests, followed by a - # list of actions that are the same in each manifest. - try: - action_lists = list(manifest.Manifest.comm(manifest_list)) - except manifest.ManifestDuplicateError as e: - raise PkgmergeException(e) - - # Declare new package FMRI. - action_lists[-1].insert(0, - actions.fromstr("set name=pkg.fmri value={0}".format(new_fmri))) - - for a_list, v in zip(action_lists[:-1], variant_list): - for a in a_list: - a.attrs[variant] = v - # discard any blend tags for this variant from common list - for a in action_lists[-1]: - blend_attrs = set(a.attrlist("pkg.merge.blend")) - match = blend_names & blend_attrs - for m in list(match): - if len(blend_attrs) == 1: - del a.attrs["pkg.merge.blend"] - else: - a.attrlist("pkg.merge.blend").remove(m) - # combine actions into single list - allactions = reduce(lambda a, b: a + b, action_lists) - - # figure out which variants are actually there for this pkg - actual_variant_list = [ - v - for m, v in zip(manifest_list, variant_list) - ] + """Private merge implementation.""" + + # Remove variant tags, package variant metadata, and signatures + # from manifests since we're reassigning. This allows merging + # pre-tagged, already merged pkgs, or signed packages. + + blended_actions = [] + blend_names = set([variant, variant[8:]]) + + for j, m in enumerate(manifest_list): + deleted_count = 0 + vval = variant_list[j] + for i, a in enumerate(m.actions[:]): + if a.name == "signature" or ( + a.name == "set" and a.attrs["name"] == "pkg.fmri" + ): + # signatures and pkg.fmri actions are no longer + # valid after merging + del m.actions[i - deleted_count] + deleted_count += 1 + continue + + if variant in a.attrs: + if a.attrs[variant] != vval: + # we have an already merged + # manifest; filter out actions + # for other variants + del m.actions[i - deleted_count] + deleted_count += 1 + continue + else: + del a.attrs[variant] + + if a.name == "set" and a.attrs["name"] == variant: + if vval not in a.attrlist("value"): + raise PkgmergeException( + _( + "package {pkg} is tagged as " + "not supporting {var_name} " + "{var_value}" + ).format( + pkg=fmri_list[j], var_name=variant, var_value=vval + ) + ) + del m.actions[i - deleted_count] + deleted_count += 1 + # checking if we're supposed to blend this action + # for this variant. Handle prepended "variant.". + if blend_names & set(a.attrlist("pkg.merge.blend")): + blended_actions.append((j, a)) + + # add blended actions to other manifests + for j, m in enumerate(manifest_list): + for k, a in blended_actions: + if k != j: + m.actions.append(a) + + # Like the unix utility comm, except that this function + # takes an arbitrary number of manifests and compares them, + # returning a tuple consisting of each manifest's actions + # that are not the same for all manifests, followed by a + # list of actions that are the same in each manifest. + try: + action_lists = list(manifest.Manifest.comm(manifest_list)) + except manifest.ManifestDuplicateError as e: + raise PkgmergeException(e) + + # Declare new package FMRI. + action_lists[-1].insert( + 0, actions.fromstr("set name=pkg.fmri value={0}".format(new_fmri)) + ) + + for a_list, v in zip(action_lists[:-1], variant_list): + for a in a_list: + a.attrs[variant] = v + # discard any blend tags for this variant from common list + for a in action_lists[-1]: + blend_attrs = set(a.attrlist("pkg.merge.blend")) + match = blend_names & blend_attrs + for m in list(match): + if len(blend_attrs) == 1: + del a.attrs["pkg.merge.blend"] + else: + a.attrlist("pkg.merge.blend").remove(m) + # combine actions into single list + allactions = reduce(lambda a, b: a + b, action_lists) + + # figure out which variants are actually there for this pkg + actual_variant_list = [v for m, v in zip(manifest_list, variant_list)] + + # add set action to document which variants are supported + allactions.append( + actions.fromstr( + "set name={0} {1}".format( + variant, + " ".join(["value={0}".format(a) for a in actual_variant_list]), + ) + ) + ) - # add set action to document which variants are supported - allactions.append(actions.fromstr("set name={0} {1}".format(variant, - " ".join([ - "value={0}".format(a) - for a in actual_variant_list - ]) - ))) + allactions.sort() - allactions.sort() + m = manifest.Manifest(pfmri=new_fmri) + m.set_content(content=allactions) + return m - m = manifest.Manifest(pfmri=new_fmri) - m.set_content(content=allactions) - return m def build_merge_list(include, exclude, cat): - """Given a list of patterns to include and a list of patterns - to exclude, return a dictionary of fmris to be included, - along w/ a list of include patterns that don't match""" + """Given a list of patterns to include and a list of patterns + to exclude, return a dictionary of fmris to be included, + along w/ a list of include patterns that don't match""" - include_dict, include_misses = match_user_fmris(include, cat) - exclude_dict, ignored = match_user_fmris(exclude, cat) + include_dict, include_misses = match_user_fmris(include, cat) + exclude_dict, ignored = match_user_fmris(exclude, cat) - for pkg_name in include_dict: - if pkg_name in exclude_dict: - include_dict[pkg_name] -= exclude_dict[pkg_name] + for pkg_name in include_dict: + if pkg_name in exclude_dict: + include_dict[pkg_name] -= exclude_dict[pkg_name] + + return ( + dict( + (k, sorted(list(v), reverse=True)[0]) + for k, v in six.iteritems(include_dict) + if v + ), + include_misses, + ) - return dict((k, sorted(list(v), reverse=True)[0]) - for k,v in six.iteritems(include_dict) - if v), include_misses def match_user_fmris(patterns, cat): - """Given a user-specified list of patterns, return a dictionary - of matching fmri sets: - - {pkgname: [fmri, ... ] - pkgname: [fmri, ... ] - ... - } - - Note that patterns starting w/ pkg:/ require an exact match; - patterns containing '*' will using fnmatch rules; the default - trailing match rules are used for remaining patterns. - """ - - matchers = [] - fmris = [] - versions = [] - - # ignore dups - patterns = list(set(patterns)) - - # figure out which kind of matching rules to employ - latest_pats = set() - for pat in patterns: - try: - parts = pat.split("@", 1) - pat_stem = parts[0] - pat_ver = None - if len(parts) > 1: - pat_ver = parts[1] - - if "*" in pat_stem or "?" in pat_stem: - matcher = pkg.fmri.glob_match - elif pat_stem.startswith("pkg:/") or \ - pat_stem.startswith("/"): - matcher = pkg.fmri.exact_name_match - else: - matcher = pkg.fmri.fmri_match - - if matcher == pkg.fmri.glob_match: - fmri = pkg.fmri.MatchingPkgFmri(pat_stem) - else: - fmri = pkg.fmri.PkgFmri(pat_stem) - - if not pat_ver: - # Do nothing. - pass - elif "*" in pat_ver or "?" in pat_ver or \ - pat_ver == "latest": - fmri.version = \ - pkg.version.MatchingVersion(pat_ver) - else: - fmri.version = \ - pkg.version.Version(pat_ver) - - if pat_ver and \ - getattr(fmri.version, "match_latest", None): - latest_pats.add(pat) - - matchers.append(matcher) - versions.append(fmri.version) - fmris.append(fmri) - except (pkg.fmri.FmriError, - pkg.version.VersionError) as e: - raise PkgmergeException(str(e)) - - # Create a dictionary of patterns, with each value being a - # dictionary of pkg names & fmris that match that pattern. - ret = dict(zip(patterns, [dict() for i in patterns])) - - for name in cat.keys(): - for pat, matcher, fmri, version in \ - zip(patterns, matchers, fmris, versions): - if not matcher(name, fmri.pkg_name): - continue # name doesn't match - for ver, pfmris in cat[name]: - if version and not ver.is_successor(version, - pkg.version.CONSTRAINT_AUTO): - continue # version doesn't match - for f in pfmris: - ret[pat].setdefault(f.pkg_name, - []).append(f) - - # Discard all but the newest version of each match. - if latest_pats: - # Rebuild ret based on latest version of every package. - latest = {} - nret = {} - for p in patterns: - if p not in latest_pats or not ret[p]: - nret[p] = ret[p] - continue - - nret[p] = {} - for pkg_name in ret[p]: - nret[p].setdefault(pkg_name, []) - for f in ret[p][pkg_name]: - nver = latest.get(f.pkg_name, None) - latest[f.pkg_name] = max(nver, - f.version) - if f.version == latest[f.pkg_name]: - # Allow for multiple FMRIs of - # the same latest version. - nret[p][pkg_name] = [ - e - for e in nret[p][pkg_name] - if e.version == f.version - ] - nret[p][pkg_name].append(f) - - # Assign new version of ret and discard latest list. - ret = nret - del latest - - # merge patterns together and create sets - merge_dict = {} - for d in ret.values(): - merge_dict.update(d) - - for k in merge_dict: - merge_dict[k] = set(merge_dict[k]) - - unmatched_patterns = [ - p - for p in ret - if not ret[p] - ] + """Given a user-specified list of patterns, return a dictionary + of matching fmri sets: - return merge_dict, unmatched_patterns + {pkgname: [fmri, ... ] + pkgname: [fmri, ... ] + ... + } + Note that patterns starting w/ pkg:/ require an exact match; + patterns containing '*' will using fnmatch rules; the default + trailing match rules are used for remaining patterns. + """ -if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - # Make all warnings be errors. - import warnings - warnings.simplefilter('error') - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) + matchers = [] + fmris = [] + versions = [] + + # ignore dups + patterns = list(set(patterns)) + + # figure out which kind of matching rules to employ + latest_pats = set() + for pat in patterns: try: - __ret = main_func() - except (pkg.actions.ActionError, trans.TransactionError, - RuntimeError, pkg.fmri.FmriError, apx.ApiException) as __e: - print("pkgmerge: {0}".format(__e), file=sys.stderr) - __ret = EXIT_OOPS - except (PipeError, KeyboardInterrupt): - __ret = EXIT_OOPS - except SystemExit as __e: - raise __e - except EnvironmentError as __e: - error(str(apx._convert_error(__e))) - __ret = EXIT_OOPS - except Exception as __e: - traceback.print_exc() - error(misc.get_traceback_message(), exitcode=None) - __ret = 99 - finally: - cleanup() + parts = pat.split("@", 1) + pat_stem = parts[0] + pat_ver = None + if len(parts) > 1: + pat_ver = parts[1] + + if "*" in pat_stem or "?" in pat_stem: + matcher = pkg.fmri.glob_match + elif pat_stem.startswith("pkg:/") or pat_stem.startswith("/"): + matcher = pkg.fmri.exact_name_match + else: + matcher = pkg.fmri.fmri_match + + if matcher == pkg.fmri.glob_match: + fmri = pkg.fmri.MatchingPkgFmri(pat_stem) + else: + fmri = pkg.fmri.PkgFmri(pat_stem) + + if not pat_ver: + # Do nothing. + pass + elif "*" in pat_ver or "?" in pat_ver or pat_ver == "latest": + fmri.version = pkg.version.MatchingVersion(pat_ver) + else: + fmri.version = pkg.version.Version(pat_ver) + + if pat_ver and getattr(fmri.version, "match_latest", None): + latest_pats.add(pat) + + matchers.append(matcher) + versions.append(fmri.version) + fmris.append(fmri) + except (pkg.fmri.FmriError, pkg.version.VersionError) as e: + raise PkgmergeException(str(e)) + + # Create a dictionary of patterns, with each value being a + # dictionary of pkg names & fmris that match that pattern. + ret = dict(zip(patterns, [dict() for i in patterns])) + + for name in cat.keys(): + for pat, matcher, fmri, version in zip( + patterns, matchers, fmris, versions + ): + if not matcher(name, fmri.pkg_name): + continue # name doesn't match + for ver, pfmris in cat[name]: + if version and not ver.is_successor( + version, pkg.version.CONSTRAINT_AUTO + ): + continue # version doesn't match + for f in pfmris: + ret[pat].setdefault(f.pkg_name, []).append(f) + + # Discard all but the newest version of each match. + if latest_pats: + # Rebuild ret based on latest version of every package. + latest = {} + nret = {} + for p in patterns: + if p not in latest_pats or not ret[p]: + nret[p] = ret[p] + continue + + nret[p] = {} + for pkg_name in ret[p]: + nret[p].setdefault(pkg_name, []) + for f in ret[p][pkg_name]: + nver = latest.get(f.pkg_name, None) + latest[f.pkg_name] = max(nver, f.version) + if f.version == latest[f.pkg_name]: + # Allow for multiple FMRIs of + # the same latest version. + nret[p][pkg_name] = [ + e + for e in nret[p][pkg_name] + if e.version == f.version + ] + nret[p][pkg_name].append(f) + + # Assign new version of ret and discard latest list. + ret = nret + del latest + + # merge patterns together and create sets + merge_dict = {} + for d in ret.values(): + merge_dict.update(d) - sys.exit(__ret) + for k in merge_dict: + merge_dict[k] = set(merge_dict[k]) + + unmatched_patterns = [p for p in ret if not ret[p]] + + return merge_dict, unmatched_patterns + + +if __name__ == "__main__": + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + # Make all warnings be errors. + import warnings + + warnings.simplefilter("error") + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + try: + __ret = main_func() + except ( + pkg.actions.ActionError, + trans.TransactionError, + RuntimeError, + pkg.fmri.FmriError, + apx.ApiException, + ) as __e: + print("pkgmerge: {0}".format(__e), file=sys.stderr) + __ret = EXIT_OOPS + except (PipeError, KeyboardInterrupt): + __ret = EXIT_OOPS + except SystemExit as __e: + raise __e + except EnvironmentError as __e: + error(str(apx._convert_error(__e))) + __ret = EXIT_OOPS + except Exception as __e: + traceback.print_exc() + error(misc.get_traceback_message(), exitcode=None) + __ret = 99 + finally: + cleanup() + + sys.exit(__ret) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/pkgmogrify.py b/src/util/publish/pkgmogrify.py index 3a724db4b..fa038723f 100755 --- a/src/util/publish/pkgmogrify.py +++ b/src/util/publish/pkgmogrify.py @@ -24,7 +24,9 @@ # from __future__ import print_function -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() import getopt import gettext import locale @@ -40,134 +42,149 @@ def usage(errmsg="", exitcode=EXIT_BADOPT): - """Emit a usage message and optionally prefix it with a more specific - error message. Causes program to exit.""" + """Emit a usage message and optionally prefix it with a more specific + error message. Causes program to exit.""" - if errmsg: - print("pkgmogrify: {0}".format(errmsg), file=sys.stderr) + if errmsg: + print("pkgmogrify: {0}".format(errmsg), file=sys.stderr) - print(_("""\ + print( + _( + """\ Usage: pkgmogrify [-vi] [-I includedir ...] [-D macro=value ...] - [-O outputfile] [-P printfile] [inputfile ...]""")) - sys.exit(exitcode) + [-O outputfile] [-P printfile] [inputfile ...]""" + ) + ) + sys.exit(exitcode) + def error(text, exitcode=EXIT_OOPS): - """Emit an error message prefixed by the command name """ + """Emit an error message prefixed by the command name""" + + print("pkgmogrify: {0}".format(text), file=sys.stderr) + if exitcode != None: + sys.exit(exitcode) - print("pkgmogrify: {0}".format(text), file=sys.stderr) - if exitcode != None: - sys.exit(exitcode) def main_func(): - outfilename = None - printfilename = None - verbose = False - ignoreincludes = False - includes = [] - macros = {} - printinfo = [] - output = [] - - try: - opts, pargs = getopt.getopt(sys.argv[1:], "ivD:I:O:P:?", ["help"]) - for opt, arg in opts: - if opt == "-D": - if "=" not in arg: - error(_("macros must be of form name=value")) - a = arg.split("=", 1) - if a[0] == "": - error(_("macros must be of form name=value")) - macros.update([("$({0})".format(a[0]), a[1])]) - if opt == "-i": - ignoreincludes = True - if opt == "-I": - includes.append(arg) - if opt == "-O": - outfilename = arg - if opt == "-P": - printfilename = arg - if opt == "-v": - verbose = True - if opt in ("--help", "-?"): - usage(exitcode=EXIT_OK) - - except getopt.GetoptError as e: - usage(_("illegal global option -- {0}").format(e.opt)) - - try: - mog.process_mog(pargs, ignoreincludes, verbose, includes, - macros, printinfo, output, error_cb=error) - except RuntimeError as e: - sys.exit(EXIT_OOPS) - - try: - if printfilename == None: - printfile = sys.stdout + outfilename = None + printfilename = None + verbose = False + ignoreincludes = False + includes = [] + macros = {} + printinfo = [] + output = [] + + try: + opts, pargs = getopt.getopt(sys.argv[1:], "ivD:I:O:P:?", ["help"]) + for opt, arg in opts: + if opt == "-D": + if "=" not in arg: + error(_("macros must be of form name=value")) + a = arg.split("=", 1) + if a[0] == "": + error(_("macros must be of form name=value")) + macros.update([("$({0})".format(a[0]), a[1])]) + if opt == "-i": + ignoreincludes = True + if opt == "-I": + includes.append(arg) + if opt == "-O": + outfilename = arg + if opt == "-P": + printfilename = arg + if opt == "-v": + verbose = True + if opt in ("--help", "-?"): + usage(exitcode=EXIT_OK) + + except getopt.GetoptError as e: + usage(_("illegal global option -- {0}").format(e.opt)) + + try: + mog.process_mog( + pargs, + ignoreincludes, + verbose, + includes, + macros, + printinfo, + output, + error_cb=error, + ) + except RuntimeError as e: + sys.exit(EXIT_OOPS) + + try: + if printfilename == None: + printfile = sys.stdout + else: + printfile = open(printfilename, "w") + + for p in printinfo: + print("{0}".format(p), file=printfile) + + except IOError as e: + error(_("Cannot write extra data {0}").format(e)) + + try: + if outfilename == None: + outfile = sys.stdout + else: + outfile = open(outfilename, "w") + + emitted = set() + for comment, actionlist, prepended_macro in output: + if comment: + for l in comment: + print("{0}".format(l), file=outfile) + + for i, action in enumerate(actionlist): + if action is None: + continue + if prepended_macro is None: + s = "{0}".format(action) else: - printfile = open(printfilename, "w") - - for p in printinfo: - print("{0}".format(p), file=printfile) + s = "{0}{1}".format(prepended_macro, action) + # The first action is the original action and + # should be printed; later actions are all + # emitted and should only be printed if not + # duplicates. + if i == 0: + print(s, file=outfile) + elif s not in emitted: + print(s, file=outfile) + emitted.add(s) + except IOError as e: + error(_("Cannot write output {0}").format(e)) + + return 0 - except IOError as e: - error(_("Cannot write extra data {0}").format(e)) - - try: - if outfilename == None: - outfile = sys.stdout - else: - outfile = open(outfilename, "w") - - emitted = set() - for comment, actionlist, prepended_macro in output: - if comment: - for l in comment: - print("{0}".format(l), file=outfile) - - for i, action in enumerate(actionlist): - if action is None: - continue - if prepended_macro is None: - s = "{0}".format(action) - else: - s = "{0}{1}".format(prepended_macro, action) - # The first action is the original action and - # should be printed; later actions are all - # emitted and should only be printed if not - # duplicates. - if i == 0: - print(s, file=outfile) - elif s not in emitted: - print(s, file=outfile) - emitted.add(s) - except IOError as e: - error(_("Cannot write output {0}").format(e)) - - return 0 if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - # Make all warnings be errors. - warnings.simplefilter('error') - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - try: - exit_code = main_func() - except (PipeError, KeyboardInterrupt): - exit_code = EXIT_OOPS - except SystemExit as __e: - exit_code = __e - except Exception as __e: - traceback.print_exc() - error(misc.get_traceback_message(), exitcode=None) - exit_code = 99 - - sys.exit(exit_code) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + # Make all warnings be errors. + warnings.simplefilter("error") + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + try: + exit_code = main_func() + except (PipeError, KeyboardInterrupt): + exit_code = EXIT_OOPS + except SystemExit as __e: + exit_code = __e + except Exception as __e: + traceback.print_exc() + error(misc.get_traceback_message(), exitcode=None) + exit_code = 99 + + sys.exit(exit_code) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/pkgsurf.py b/src/util/publish/pkgsurf.py index 032f4028b..1d78ea110 100644 --- a/src/util/publish/pkgsurf.py +++ b/src/util/publish/pkgsurf.py @@ -66,7 +66,9 @@ # change. It runs a catalog rebuild as the last step to regain catalog integrity # within the repo. -import pkg.site_paths; pkg.site_paths.init() +import pkg.site_paths + +pkg.site_paths.init() import getopt import gettext import locale @@ -102,53 +104,61 @@ repo_finished = False repo_uri = None + def error(text, cmd=None): - """Emit an error message prefixed by the command name """ + """Emit an error message prefixed by the command name""" - if cmd: - text = "\n{0}: {1}".format(cmd, text) + if cmd: + text = "\n{0}: {1}".format(cmd, text) - else: - text = "\n{0}: {1}".format(PKG_CLIENT_NAME, text) + else: + text = "\n{0}: {1}".format(PKG_CLIENT_NAME, text) + # If the message starts with whitespace, assume that it should come + # *before* the command-name prefix. + text_nows = text.lstrip() + ws = text[: len(text) - len(text_nows)] - # If the message starts with whitespace, assume that it should come - # *before* the command-name prefix. - text_nows = text.lstrip() - ws = text[:len(text) - len(text_nows)] + # This has to be a constant value as we can't reliably get our actual + # program name on all platforms. + emsg(ws + text_nows) - # This has to be a constant value as we can't reliably get our actual - # program name on all platforms. - emsg(ws + text_nows) def cleanup(no_msg=False): - """Remove temporary directories. Print error msg in case operation - was not finished.""" + """Remove temporary directories. Print error msg in case operation + was not finished.""" - global temp_root + global temp_root - if repo_modified and not repo_finished and not no_msg: - error(_(""" + if repo_modified and not repo_finished and not no_msg: + error( + _( + """ The target repository has been modified but the operation did not finish successfully. It is now in an inconsistent state. To re-try the operation, run the following commands: /usr/bin/pkgrepo rebuild -s {repo} {argv} -""").format(repo=repo_uri, argv=" ".join(sys.argv))) +""" + ).format(repo=repo_uri, argv=" ".join(sys.argv)) + ) + + if temp_root: + shutil.rmtree(temp_root) + temp_root = None - if temp_root: - shutil.rmtree(temp_root) - temp_root = None def usage(usage_error=None, cmd=None, retcode=pkgdefs.EXIT_BADOPT): - """Emit a usage message and optionally prefix it with a more specific - error message. Causes program to exit.""" + """Emit a usage message and optionally prefix it with a more specific + error message. Causes program to exit.""" - if usage_error: - error(usage_error, cmd=cmd) + if usage_error: + error(usage_error, cmd=cmd) - emsg(_("""\ + emsg( + _( + """\ Usage: pkgsurf -s target_path -r ref_uri [-n] [-p publisher ...] [-i name ...] [-c pattern ...] @@ -174,717 +184,805 @@ def usage(usage_error=None, cmd=None, retcode=pkgdefs.EXIT_BADOPT): filesystem-based repository. -?/--help Print this message. -""")) +""" + ) + ) + sys.exit(retcode) - sys.exit(retcode) def abort(err=None, retcode=pkgdefs.EXIT_OOPS): - """To be called when a fatal error is encountered.""" + """To be called when a fatal error is encountered.""" - if err: - # Clear any possible output first. - msg("") - error(err) + if err: + # Clear any possible output first. + msg("") + error(err) + + cleanup() + sys.exit(retcode) - cleanup() - sys.exit(retcode) def fetch_catalog(src_pub, xport, temp_root): - """Fetch the catalog from src_uri.""" + """Fetch the catalog from src_uri.""" - if not src_pub.meta_root: - # Create a temporary directory for catalog. - cat_dir = tempfile.mkdtemp(dir=temp_root) - src_pub.meta_root = cat_dir + if not src_pub.meta_root: + # Create a temporary directory for catalog. + cat_dir = tempfile.mkdtemp(dir=temp_root) + src_pub.meta_root = cat_dir - src_pub.transport = xport - src_pub.refresh(full_refresh=True, immediate=True) + src_pub.transport = xport + src_pub.refresh(full_refresh=True, immediate=True) + + return src_pub.catalog - return src_pub.catalog def get_latest(cat): - """ Get latest packages (surface) from given catalog. - Returns a dict of the form: - { pkg-name: pkg-fmri, ... } - """ - matching, ref, unmatched = cat.get_matching_fmris(["*@latest"]) + """Get latest packages (surface) from given catalog. + Returns a dict of the form: + { pkg-name: pkg-fmri, ... } + """ + matching, ref, unmatched = cat.get_matching_fmris(["*@latest"]) + + del ref - del ref + matches = {} + for m in matching: + matches[m] = matching[m][0] - matches = {} - for m in matching: - matches[m] = matching[m][0] + return matches - return matches def get_matching_pkgs(cat, patterns): - """Get the matching pkg FMRIs from catalog 'cat' based on the input - patterns 'patterns'.""" + """Get the matching pkg FMRIs from catalog 'cat' based on the input + patterns 'patterns'.""" + + versions = set() + for p in patterns: + if "@" in p: + versions.add(p) - versions = set() - for p in patterns: - if "@" in p: - versions.add(p) + if versions: + msg = _( + "Packages specified to not be reversioned cannot " + "contain versions:\n\t" + ) + msg += "\n\t".join(versions) + abort(msg) - if versions: - msg = _("Packages specified to not be reversioned cannot " - "contain versions:\n\t") - msg += "\n\t".join(versions) - abort(msg) + matching, ref, unmatched = cat.get_matching_fmris(patterns) - matching, ref, unmatched = cat.get_matching_fmris(patterns) + if unmatched: + msg = _( + "The specified packages were not found in the " "repository:\n\t" + ) + msg += "\n\t".join(unmatched) + abort(msg) - if unmatched: - msg = _("The specified packages were not found in the " - "repository:\n\t") - msg += "\n\t".join(unmatched) - abort(msg) + return list(matching.keys()) - return list(matching.keys()) def get_manifest(repo, pub, pfmri): - """ Retrieve a manifest with FMRI 'pfmri' of publisher 'pub' from - repository object 'repo'. """ + """Retrieve a manifest with FMRI 'pfmri' of publisher 'pub' from + repository object 'repo'.""" + + path = repo.manifest(pfmri, pub) + mani = manifest.Manifest(pfmri) + try: + mani.set_content(pathname=path) + except Exception as e: + abort( + err=_( + "Can not open manifest file {file}: {err}\n" + "Please run 'pkgrepo verify -s {rroot}' to check the " + "integrity of the repository." + ).format(file=path, err=str(e), rroot=repo.root) + ) + return mani - path = repo.manifest(pfmri, pub) - mani = manifest.Manifest(pfmri) - try: - mani.set_content(pathname=path) - except Exception as e: - abort(err=_("Can not open manifest file {file}: {err}\n" - "Please run 'pkgrepo verify -s {rroot}' to check the " - "integrity of the repository.").format( - file=path, err=str(e), rroot=repo.root)) - return mani def get_tracker(): - try: - progresstracker = \ - progress.FancyUNIXProgressTracker() - except progress.ProgressTrackerException: - progresstracker = progress.CommandLineProgressTracker() - progresstracker.set_major_phase(progresstracker.PHASE_UTILITY) - return progresstracker + try: + progresstracker = progress.FancyUNIXProgressTracker() + except progress.ProgressTrackerException: + progresstracker = progress.CommandLineProgressTracker() + progresstracker.set_major_phase(progresstracker.PHASE_UTILITY) + return progresstracker -def subs_undef_fmri_str(fmri_str, latest_ref_pkgs): - """ Substitute correct dependency FMRI if no counterpart can be found in - the reference manifest. Use the original FMRI in case the current - version of dependency pkg in the repo is still a successor of the - specified dependency FMRI, otherwise substitute the complete version of - the pkg currently present in the repo.""" - dpfmri = fmri.PkgFmri(fmri_str) - ndpfmri = latest_ref_pkgs[dpfmri.get_name()] +def subs_undef_fmri_str(fmri_str, latest_ref_pkgs): + """Substitute correct dependency FMRI if no counterpart can be found in + the reference manifest. Use the original FMRI in case the current + version of dependency pkg in the repo is still a successor of the + specified dependency FMRI, otherwise substitute the complete version of + the pkg currently present in the repo.""" + + dpfmri = fmri.PkgFmri(fmri_str) + ndpfmri = latest_ref_pkgs[dpfmri.get_name()] + + if ndpfmri.is_successor(dpfmri): + return fmri_str + + return ndpfmri.get_short_fmri(anarchy=True) + + +def get_dep_fmri_str( + fmri_str, pkg, act, latest_ref_pkgs, reversioned_pkgs, ref_xport +): + """Get the adjusted dependency FMRI of package 'pkg' specified in + action 'act' based on if the FMRI belongs to a reversioned package or + not. 'fmri_str' contains the original FMRI string from the manifest to + be adjusted. This has to be passed in separately since in case of + require-any or group-any dependencies, an action can contain multiple + FMRIs.""" + + dpfmri = fmri.PkgFmri(fmri_str) + + # Versionless dependencies don't need to be changed. + if not dpfmri.version: + return fmri_str + + # Dep package hasn't been changed, no adjustment necessary. + if dpfmri.get_pkg_stem() not in reversioned_pkgs: + return fmri_str + + # Find the dependency action of the reference package + # and replace the current version with it. + try: + ref_mani = ref_xport.get_manifest(latest_ref_pkgs[pkg]) + except KeyError: + # This package is not in the ref repo so we just substitute the + # dependency. + return subs_undef_fmri_str(fmri_str, latest_ref_pkgs) - if ndpfmri.is_successor(dpfmri): - return fmri_str + for ra in ref_mani.gen_actions_by_type("depend"): + # Any difference other than the FMRI means we + # can't use this action as a reference. + diffs = act.differences(ra) + if "fmri" in diffs: + diffs.remove("fmri") + if diffs: + continue - return ndpfmri.get_short_fmri(anarchy=True) + fmris = ra.attrlist("fmri") -def get_dep_fmri_str(fmri_str, pkg, act, latest_ref_pkgs, reversioned_pkgs, - ref_xport): - """Get the adjusted dependency FMRI of package 'pkg' specified in - action 'act' based on if the FMRI belongs to a reversioned package or - not. 'fmri_str' contains the original FMRI string from the manifest to - be adjusted. This has to be passed in separately since in case of - require-any or group-any dependencies, an action can contain multiple - FMRIs. """ + for rf in fmris: + rpfmri = fmri.PkgFmri(rf) + if rpfmri.get_pkg_stem() != dpfmri.get_pkg_stem(): + continue - dpfmri = fmri.PkgFmri(fmri_str) + # Only substitute dependency if it actually + # changed. + if ( + not rpfmri.version + or rpfmri.get_version() != dpfmri.get_version() + ): + return rf - # Versionless dependencies don't need to be changed. - if not dpfmri.version: - return fmri_str + return fmri_str - # Dep package hasn't been changed, no adjustment necessary. - if dpfmri.get_pkg_stem() not in reversioned_pkgs: - return fmri_str + # If a varcet changed we might not find the matching action. + return subs_undef_fmri_str(fmri_str, latest_ref_pkgs) - # Find the dependency action of the reference package - # and replace the current version with it. - try: - ref_mani = ref_xport.get_manifest(latest_ref_pkgs[pkg]) - except KeyError: - # This package is not in the ref repo so we just substitute the - # dependency. - return subs_undef_fmri_str(fmri_str, latest_ref_pkgs) - - for ra in ref_mani.gen_actions_by_type("depend"): - # Any difference other than the FMRI means we - # can't use this action as a reference. - diffs = act.differences(ra) - if "fmri" in diffs: - diffs.remove("fmri") - if diffs: - continue - - fmris = ra.attrlist("fmri") - - for rf in fmris: - rpfmri = fmri.PkgFmri(rf) - if rpfmri.get_pkg_stem() != dpfmri.get_pkg_stem(): - continue - - # Only substitute dependency if it actually - # changed. - if not rpfmri.version \ - or rpfmri.get_version() != dpfmri.get_version(): - return rf - - return fmri_str - - # If a varcet changed we might not find the matching action. - return subs_undef_fmri_str(fmri_str, latest_ref_pkgs) def adjust_dep_action(pkg, act, latest_ref_pkgs, reversioned_pkgs, ref_xport): - """Adjust dependency FMRIs of action 'act' if it is of type depend. - The adjusted action will reference only FMRIs which are present in the - reversioned repo. """ + """Adjust dependency FMRIs of action 'act' if it is of type depend. + The adjusted action will reference only FMRIs which are present in the + reversioned repo.""" - modified = False + modified = False - # Drop signatures (changed dependency will void signature value). - if act.name == "signature": - return - # Ignore anything other than depend actions. - elif act.name != "depend": - return act + # Drop signatures (changed dependency will void signature value). + if act.name == "signature": + return + # Ignore anything other than depend actions. + elif act.name != "depend": + return act - # require-any and group-any deps are a list so convert every dep FMRI - # into a list. - fmris = act.attrlist("fmri") + # require-any and group-any deps are a list so convert every dep FMRI + # into a list. + fmris = act.attrlist("fmri") - new_dep = [] - for f in fmris: - new_f = get_dep_fmri_str(f, pkg, act, latest_ref_pkgs, - reversioned_pkgs, ref_xport) - if not modified and f != new_f: - modified = True - new_dep.append(new_f) + new_dep = [] + for f in fmris: + new_f = get_dep_fmri_str( + f, pkg, act, latest_ref_pkgs, reversioned_pkgs, ref_xport + ) + if not modified and f != new_f: + modified = True + new_dep.append(new_f) - if not modified: - return act + if not modified: + return act - if len(new_dep) == 1: - new_dep = new_dep[0] + if len(new_dep) == 1: + new_dep = new_dep[0] - nact = actions.fromstr(str(act)) - nact.attrs["fmri"] = new_dep + nact = actions.fromstr(str(act)) + nact.attrs["fmri"] = new_dep + + return nact - return nact def use_ref(a, deps, ignores): - """Determine if the given action indicates that the pkg can be - reversioned.""" - - if a.name == "set" and "name" in a.attrs: - if a.attrs["name"] in ignores: - return True - # We ignore the pkg FMRI because this is what - # will always change. - if a.attrs["name"] == "pkg.fmri": - return True - - # Signature will always change. - if a.name == "signature": - return True - - if a.name == "depend": - # TODO: support dependency lists - # For now, treat as content change. - if not isinstance(a.attrs["fmri"], six.string_types): - return False - dpfmri = fmri.PkgFmri(a.attrs["fmri"]) - deps.add(dpfmri.get_pkg_stem()) - return True + """Determine if the given action indicates that the pkg can be + reversioned.""" + + if a.name == "set" and "name" in a.attrs: + if a.attrs["name"] in ignores: + return True + # We ignore the pkg FMRI because this is what + # will always change. + if a.attrs["name"] == "pkg.fmri": + return True + + # Signature will always change. + if a.name == "signature": + return True - return False + if a.name == "depend": + # TODO: support dependency lists + # For now, treat as content change. + if not isinstance(a.attrs["fmri"], six.string_types): + return False + dpfmri = fmri.PkgFmri(a.attrs["fmri"]) + deps.add(dpfmri.get_pkg_stem()) + return True -def do_reversion(pub, ref_pub, target_repo, ref_xport, changes, ignores, - cmp_policy, ref_repo, ref, ref_xport_cfg): - """Do the repo reversion. - Return 'True' if repo got modified, 'False' otherwise.""" - - global temp_root, tracker, dry_run, repo_finished, repo_modified - - target_cat = target_repo.get_catalog(pub=pub) - ref_cat = fetch_catalog(ref_pub, ref_xport, temp_root) - - latest_pkgs = get_latest(target_cat) - latest_ref_pkgs = get_latest(ref_cat) - - no_revs = get_matching_pkgs(target_cat, changes) - - # We use bulk prefetching for faster transport of the manifests. - # Prefetch requires an intent which it sends to the server. Here - # we just use operation=reversion for all FMRIs. - intent = "operation=reversion;" - # use list() to force the zip() to evaluate - ref_pkgs = list(zip(latest_ref_pkgs.values(), repeat(intent))) - - # Retrieve reference manifests. - # Try prefetching manifests in bulk first for faster, parallel - # transport. Retryable errors during prefetch are ignored and - # manifests are retrieved again during the "Reading" phase. - ref_xport.prefetch_manifests(ref_pkgs, progtrack=tracker) - - # Need to change the output of mfst_fetch since otherwise we - # would see "Download Manifests x/y" twice, once from the - # prefetch and once from the actual manifest analysis. - tracker.mfst_fetch = progress.GoalTrackerItem(_("Analyzing Manifests")) - - tracker.manifest_fetch_start(len(latest_pkgs)) - - reversioned_pkgs = set() - depend_changes = {} - dups = 0 # target pkg has equal version to ref pkg - new_p = 0 # target pkg not in ref - sucs = 0 # ref pkg is successor to pkg in targ - nrevs = 0 # pkgs requested to not be reversioned by user - manifest_errors = set() - - for p in latest_pkgs: - # First check if the package is in the list of FMRIs the user - # doesn't want to reversion. - if p in no_revs: - nrevs += 1 - tracker.manifest_fetch_progress(completion=True) - continue - - # Check if the package is in the ref repo, if not: ignore. - if p not in latest_ref_pkgs: - new_p += 1 - tracker.manifest_fetch_progress(completion=True) - continue - - pfmri = latest_pkgs[p] - # Ignore if latest package is the same in targ and ref. - if pfmri == latest_ref_pkgs[p]: - dups += 1 - tracker.manifest_fetch_progress(completion=True) - continue - - # Ignore packages where ref version is higher. - if latest_ref_pkgs[p].is_successor(pfmri): - sucs += 1 - tracker.manifest_fetch_progress(completion=True) - continue - - # Pull the manifests for target and ref repo. - dm = get_manifest(target_repo, pub, pfmri) - rm = ref_xport.get_manifest(latest_ref_pkgs[p]) - tracker.manifest_fetch_progress(completion=True) - - tdeps = set() - rdeps = set() - - # Diff target and ref manifest. - # action only in targ, action only in ref, common action - try: - ta, ra, ca = manifest.Manifest.comm([dm, rm], - cmp_policy=cmp_policy) - except manifest.ManifestDuplicateError as e: - manifest_errors.add(e) - continue - - # Check for manifest changes. - if not all(use_ref(a, tdeps, ignores) for a in ta) \ - or not all(use_ref(a, rdeps, ignores) for a in ra): - continue - - # Both dep lists should be equally long in case deps have just - # changed. If not, it means a dep has been added or removed and - # that means content change. - if len(tdeps) != len(rdeps): - continue - - # If len is not different we still have to make sure that - # entries have the same pkg stem. The test above just saves time - # in some cases. - if not all(td in rdeps for td in tdeps): - continue - - # Pkg only contains dependency change. Keep for further - # analysis. - if tdeps: - depend_changes[pfmri.get_pkg_stem( - anarchy=True)] = tdeps - continue - - # Pkg passed all checks and can be reversioned. - reversioned_pkgs.add(pfmri.get_pkg_stem(anarchy=True)) - - tracker.manifest_fetch_done() - - # No point continuing if errors in the manifests have - # been detected. - if manifest_errors: - errmsg = ''.join(str(i) for i in manifest_errors) - abort(err=_("Manifest error(s) found:\n{}").format(errmsg)) - - def has_changed(pstem, seen=None, depth=0): - """Determine if a package or any of its dependencies has - changed. - Function will check if a dependency had a content change. If it - only had a dependency change, analyze its dependencies - recursively. Only if the whole dependency chain didn't have any - content change it is safe to reversion the package. - - Note about circular dependencies: The function keeps track of - pkgs it already processed by stuffing them into the set 'seen'. - However, 'seen' gets updated before the child dependencies of - the current pkg are examined. This works if 'seen' is only used - for one dependency chain since the function immediately comes - back with a True result if a pkg has changed further down the - tree. However, if 'seen' is re-used between runs, it will - return prematurely, likely returning wrong results. """ - - MAX_DEPTH = 100 - - if not seen: - seen = set() - - if pstem in seen: - return False - - depth += 1 - if depth > MAX_DEPTH: - # Let's make sure we don't run into any - # recursion limits. If the dep chain is too deep - # just treat as changed pkg. - error(_("Dependency chain depth of >{md:d} detected for" - " {p}.").format(md=MAX_DEPTH, p=p)) - return True - - # Pkg has no change at all. - if pstem in reversioned_pkgs: - return False - - # Pkg must have content change, if it had no change it would be - # in reversioned_pkgs, and if it had just a dep change it would - # be in depend_changes. - if pstem not in depend_changes: - return True - - # We need to update 'seen' here, otherwise we won't find this - # entry in case of a circular dependency. - seen.add(pstem) - - return any( - has_changed(d, seen, depth) - for d in depend_changes[pstem] + return False + + +def do_reversion( + pub, + ref_pub, + target_repo, + ref_xport, + changes, + ignores, + cmp_policy, + ref_repo, + ref, + ref_xport_cfg, +): + """Do the repo reversion. + Return 'True' if repo got modified, 'False' otherwise.""" + + global temp_root, tracker, dry_run, repo_finished, repo_modified + + target_cat = target_repo.get_catalog(pub=pub) + ref_cat = fetch_catalog(ref_pub, ref_xport, temp_root) + + latest_pkgs = get_latest(target_cat) + latest_ref_pkgs = get_latest(ref_cat) + + no_revs = get_matching_pkgs(target_cat, changes) + + # We use bulk prefetching for faster transport of the manifests. + # Prefetch requires an intent which it sends to the server. Here + # we just use operation=reversion for all FMRIs. + intent = "operation=reversion;" + # use list() to force the zip() to evaluate + ref_pkgs = list(zip(latest_ref_pkgs.values(), repeat(intent))) + + # Retrieve reference manifests. + # Try prefetching manifests in bulk first for faster, parallel + # transport. Retryable errors during prefetch are ignored and + # manifests are retrieved again during the "Reading" phase. + ref_xport.prefetch_manifests(ref_pkgs, progtrack=tracker) + + # Need to change the output of mfst_fetch since otherwise we + # would see "Download Manifests x/y" twice, once from the + # prefetch and once from the actual manifest analysis. + tracker.mfst_fetch = progress.GoalTrackerItem(_("Analyzing Manifests")) + + tracker.manifest_fetch_start(len(latest_pkgs)) + + reversioned_pkgs = set() + depend_changes = {} + dups = 0 # target pkg has equal version to ref pkg + new_p = 0 # target pkg not in ref + sucs = 0 # ref pkg is successor to pkg in targ + nrevs = 0 # pkgs requested to not be reversioned by user + manifest_errors = set() + + for p in latest_pkgs: + # First check if the package is in the list of FMRIs the user + # doesn't want to reversion. + if p in no_revs: + nrevs += 1 + tracker.manifest_fetch_progress(completion=True) + continue + + # Check if the package is in the ref repo, if not: ignore. + if p not in latest_ref_pkgs: + new_p += 1 + tracker.manifest_fetch_progress(completion=True) + continue + + pfmri = latest_pkgs[p] + # Ignore if latest package is the same in targ and ref. + if pfmri == latest_ref_pkgs[p]: + dups += 1 + tracker.manifest_fetch_progress(completion=True) + continue + + # Ignore packages where ref version is higher. + if latest_ref_pkgs[p].is_successor(pfmri): + sucs += 1 + tracker.manifest_fetch_progress(completion=True) + continue + + # Pull the manifests for target and ref repo. + dm = get_manifest(target_repo, pub, pfmri) + rm = ref_xport.get_manifest(latest_ref_pkgs[p]) + tracker.manifest_fetch_progress(completion=True) + + tdeps = set() + rdeps = set() + + # Diff target and ref manifest. + # action only in targ, action only in ref, common action + try: + ta, ra, ca = manifest.Manifest.comm([dm, rm], cmp_policy=cmp_policy) + except manifest.ManifestDuplicateError as e: + manifest_errors.add(e) + continue + + # Check for manifest changes. + if not all(use_ref(a, tdeps, ignores) for a in ta) or not all( + use_ref(a, rdeps, ignores) for a in ra + ): + continue + + # Both dep lists should be equally long in case deps have just + # changed. If not, it means a dep has been added or removed and + # that means content change. + if len(tdeps) != len(rdeps): + continue + + # If len is not different we still have to make sure that + # entries have the same pkg stem. The test above just saves time + # in some cases. + if not all(td in rdeps for td in tdeps): + continue + + # Pkg only contains dependency change. Keep for further + # analysis. + if tdeps: + depend_changes[pfmri.get_pkg_stem(anarchy=True)] = tdeps + continue + + # Pkg passed all checks and can be reversioned. + reversioned_pkgs.add(pfmri.get_pkg_stem(anarchy=True)) + + tracker.manifest_fetch_done() + + # No point continuing if errors in the manifests have + # been detected. + if manifest_errors: + errmsg = "".join(str(i) for i in manifest_errors) + abort(err=_("Manifest error(s) found:\n{}").format(errmsg)) + + def has_changed(pstem, seen=None, depth=0): + """Determine if a package or any of its dependencies has + changed. + Function will check if a dependency had a content change. If it + only had a dependency change, analyze its dependencies + recursively. Only if the whole dependency chain didn't have any + content change it is safe to reversion the package. + + Note about circular dependencies: The function keeps track of + pkgs it already processed by stuffing them into the set 'seen'. + However, 'seen' gets updated before the child dependencies of + the current pkg are examined. This works if 'seen' is only used + for one dependency chain since the function immediately comes + back with a True result if a pkg has changed further down the + tree. However, if 'seen' is re-used between runs, it will + return prematurely, likely returning wrong results.""" + + MAX_DEPTH = 100 + + if not seen: + seen = set() + + if pstem in seen: + return False + + depth += 1 + if depth > MAX_DEPTH: + # Let's make sure we don't run into any + # recursion limits. If the dep chain is too deep + # just treat as changed pkg. + error( + _( + "Dependency chain depth of >{md:d} detected for" " {p}." + ).format(md=MAX_DEPTH, p=p) + ) + return True + + # Pkg has no change at all. + if pstem in reversioned_pkgs: + return False + + # Pkg must have content change, if it had no change it would be + # in reversioned_pkgs, and if it had just a dep change it would + # be in depend_changes. + if pstem not in depend_changes: + return True + + # We need to update 'seen' here, otherwise we won't find this + # entry in case of a circular dependency. + seen.add(pstem) + + return any(has_changed(d, seen, depth) for d in depend_changes[pstem]) + + # Check if packages which just have a dep change can be reversioned by + # checking if child dependencies also have no content change. + dep_revs = 0 + for p in depend_changes: + if not has_changed(p): + dep_revs += 1 + reversioned_pkgs.add(p) + + status = [] + if cmp_policy == CMP_UNSIGNED: + status.append( + ( + _( + "WARNING: Signature changes in file content " + "ignored in resurfacing" ) + ) + ) + status.append((_("Packages to process:"), str(len(latest_pkgs)))) + status.append((_("New packages:"), str(new_p))) + status.append((_("Unmodified packages:"), str(dups))) + if sucs: + # This only happens if reference repo is ahead of target repo, + # so only show if it actually happened. + status.append( + (_("Packages with successors in " "reference repo:"), str(sucs)) + ) + if nrevs: + # This only happens if user specified pkgs to not revert, + # so only show if it actually happened. + status.append( + ( + _("Packages not to be reversioned by user " "request:"), + str(nrevs), + ) + ) + status.append( + ( + _("Packages with no content change:"), + str(len(reversioned_pkgs) - dep_revs), + ) + ) + status.append( + ( + _("Packages which only have dependency change:"), + str(len(depend_changes)), + ) + ) + status.append( + (_("Packages with unchanged dependency chain:"), str(dep_revs)) + ) + status.append( + (_("Packages to be reversioned:"), str(len(reversioned_pkgs))) + ) + + rjust_status = max(len(s[0]) for s in status) + rjust_value = max(len(s[1]) for s in status) + for s in status: + msg("{0} {1}".format(s[0].rjust(rjust_status), s[1].rjust(rjust_value))) + + if not reversioned_pkgs: + msg(_("\nNo packages to reversion.")) + return False - # Check if packages which just have a dep change can be reversioned by - # checking if child dependencies also have no content change. - dep_revs = 0 - for p in depend_changes: - if not has_changed(p): - dep_revs += 1 - reversioned_pkgs.add(p) - - status = [] - if cmp_policy == CMP_UNSIGNED: - status.append((_("WARNING: Signature changes in file content " - "ignored in resurfacing"))) - status.append((_("Packages to process:"), str(len(latest_pkgs)))) - status.append((_("New packages:"), str(new_p))) - status.append((_("Unmodified packages:"), str(dups))) - if sucs: - # This only happens if reference repo is ahead of target repo, - # so only show if it actually happened. - status.append((_("Packages with successors in " - "reference repo:"), str(sucs))) - if nrevs: - # This only happens if user specified pkgs to not revert, - # so only show if it actually happened. - status.append((_("Packages not to be reversioned by user " - "request:"), str(nrevs))) - status.append((_("Packages with no content change:"), - str(len(reversioned_pkgs) - dep_revs))) - status.append((_("Packages which only have dependency change:"), - str(len(depend_changes)))) - status.append((_("Packages with unchanged dependency chain:"), - str(dep_revs))) - status.append((_("Packages to be reversioned:"), - str(len(reversioned_pkgs)))) - - rjust_status = max(len(s[0]) for s in status) - rjust_value = max(len(s[1]) for s in status) - for s in status: - msg("{0} {1}".format(s[0].rjust(rjust_status), - s[1].rjust(rjust_value))) - - if not reversioned_pkgs: - msg(_("\nNo packages to reversion.")) - return False - - if dry_run: - msg(_("\nReversioning packages (dry-run).")) - else: - msg(_("\nReversioning packages.")) - - # Start the main pass. Reversion packages from reversioned_pkgs to the - # version in the ref repo. For packages which don't get reversioned, - # check if the dependency versions are still correct, fix if necessary. - tracker.reversion_start(len(latest_pkgs), len(reversioned_pkgs)) - - for p in latest_pkgs: - tracker.reversion_add_progress(pfmri, pkgs=1) - modified = False - - # Get the pkg fmri (pfmri) of the latest version based on if it - # has been reversioned or not. - stem = latest_pkgs[p].get_pkg_stem(anarchy=True) - if stem in reversioned_pkgs: - tracker.reversion_add_progress(pfmri, reversioned=1) - if dry_run: - continue - pfmri = latest_ref_pkgs[p] - # Retrieve manifest from ref repo and replace the one in - # the target repo. We don't have to adjust depndencies - # for these packages because they will not depend on - # anything we'll reversion. - rmani = ref_xport.get_manifest(pfmri) - - if cmp_policy == CMP_UNSIGNED: - # Files with different signed content hash - # values can have equivalent unsigned content - # hash. CMP_UNSIGNED relaxes comparison - # constraints and allows this case to compare - # as equal. The reversioned manifest may - # reference file data that is not present in - # the target repository, so ensure that any - # missing file data is added to the target - # repository. - add_missing_files(target_repo, pub, - latest_pkgs[p], pfmri, rmani, ref, ref_repo, - ref_xport, ref_xport_cfg, ref_pub) - - opath = target_repo.manifest(latest_pkgs[p], pub) - os.remove(opath) - path = target_repo.manifest(pfmri, pub) - try: - repo_modified = True - repo_finished = False - portable.rename(rmani.pathname, path) - except OSError as e: - abort(err=_("Could not reversion manifest " - "{path}: {err}").format(path=path, - err=str(e))) - continue - - # For packages we don't reversion we have to check if they - # depend on a reversioned package. - # Since the version of this dependency might be removed from the - # repo, we have to adjust the dep version to the one of the - # reversioned pkg. - pfmri = latest_pkgs[p] - omani = get_manifest(target_repo, pub, pfmri) - mani = manifest.Manifest(pfmri) - for act in omani.gen_actions(): - nact = adjust_dep_action(p, act, latest_ref_pkgs, - reversioned_pkgs, ref_xport) - if nact: - mani.add_action(nact, misc.EmptyI) - if nact is not act: - modified = True - - # Only touch manifest if something actually changed. - if modified: - tracker.reversion_add_progress(pfmri, adjusted=1) - if not dry_run: - path = target_repo.manifest(pfmri, pub) - repo_modified = True - repo_finished = False - mani.store(path) - tracker.reversion_done() + if dry_run: + msg(_("\nReversioning packages (dry-run).")) + else: + msg(_("\nReversioning packages.")) - return True + # Start the main pass. Reversion packages from reversioned_pkgs to the + # version in the ref repo. For packages which don't get reversioned, + # check if the dependency versions are still correct, fix if necessary. + tracker.reversion_start(len(latest_pkgs), len(reversioned_pkgs)) -def add_missing_files(target_repo, pub, latest_pkg, pfmri, rmani, ref, ref_repo, - ref_xport, ref_xport_cfg, ref_pub): - """Add missing data from reference repository to target repository.""" - - tmani = get_manifest(target_repo, pub, latest_pkg) - trstore = target_repo.get_pub_rstore(pub) - - thashes = frozenset( - ta.hash for ta in tmani.gen_actions() if ta.has_payload) - rhashes = frozenset( - ra.hash for ra in rmani.gen_actions() if ra.has_payload) - possible = rhashes - thashes - - if ref.scheme == "file": - for h in possible: - try: - target_repo.file(h) - continue - except (sr.RepositoryUnsupportedOperationError, - sr.RepositoryFileNotFoundError): - pass - - try: - trstore.copy_file(h, ref_repo.file(h)) - except (EnvironmentError, - sr.RepositoryFileNotFoundError) as e: - abort(err=_("Could not reversion file " - "{path}: {err}").format(path=h, err=str(e))) - return - - pkgdir = ref_xport_cfg.get_pkg_dir(pfmri) - mfile = ref_xport.multi_file_ni(ref_pub, pkgdir) - - downloaded = set() - for a in rmani.gen_actions(): - if (a.has_payload and a.hash in possible and - a.hash not in downloaded): - try: - target_repo.file(a.hash) - except (sr.RepositoryUnsupportedOperationError, - sr.RepositoryFileNotFoundError): - downloaded.add(a.hash) - mfile.add_action(a) - mfile.wait_files() - - for h in downloaded: - src_path = os.path.join(pkgdir, h) - try: - trstore.insert_file(h, src_path) - except (EnvironmentError, - sr.RepositoryFileNotFoundError) as e: - abort(err=_("Could not reversion file " - "{path}: {err}").format(path=h, err=str(e))) + for p in latest_pkgs: + tracker.reversion_add_progress(pfmri, pkgs=1) + modified = False -def main_func(): + # Get the pkg fmri (pfmri) of the latest version based on if it + # has been reversioned or not. + stem = latest_pkgs[p].get_pkg_stem(anarchy=True) + if stem in reversioned_pkgs: + tracker.reversion_add_progress(pfmri, reversioned=1) + if dry_run: + continue + pfmri = latest_ref_pkgs[p] + # Retrieve manifest from ref repo and replace the one in + # the target repo. We don't have to adjust depndencies + # for these packages because they will not depend on + # anything we'll reversion. + rmani = ref_xport.get_manifest(pfmri) + + if cmp_policy == CMP_UNSIGNED: + # Files with different signed content hash + # values can have equivalent unsigned content + # hash. CMP_UNSIGNED relaxes comparison + # constraints and allows this case to compare + # as equal. The reversioned manifest may + # reference file data that is not present in + # the target repository, so ensure that any + # missing file data is added to the target + # repository. + add_missing_files( + target_repo, + pub, + latest_pkgs[p], + pfmri, + rmani, + ref, + ref_repo, + ref_xport, + ref_xport_cfg, + ref_pub, + ) - global temp_root, repo_modified, repo_finished, repo_uri, tracker - global dry_run + opath = target_repo.manifest(latest_pkgs[p], pub) + os.remove(opath) + path = target_repo.manifest(pfmri, pub) + try: + repo_modified = True + repo_finished = False + portable.rename(rmani.pathname, path) + except OSError as e: + abort( + err=_( + "Could not reversion manifest " "{path}: {err}" + ).format(path=path, err=str(e)) + ) + continue + + # For packages we don't reversion we have to check if they + # depend on a reversioned package. + # Since the version of this dependency might be removed from the + # repo, we have to adjust the dep version to the one of the + # reversioned pkg. + pfmri = latest_pkgs[p] + omani = get_manifest(target_repo, pub, pfmri) + mani = manifest.Manifest(pfmri) + for act in omani.gen_actions(): + nact = adjust_dep_action( + p, act, latest_ref_pkgs, reversioned_pkgs, ref_xport + ) + if nact: + mani.add_action(nact, misc.EmptyI) + if nact is not act: + modified = True + + # Only touch manifest if something actually changed. + if modified: + tracker.reversion_add_progress(pfmri, adjusted=1) + if not dry_run: + path = target_repo.manifest(pfmri, pub) + repo_modified = True + repo_finished = False + mani.store(path) + tracker.reversion_done() + + return True + + +def add_missing_files( + target_repo, + pub, + latest_pkg, + pfmri, + rmani, + ref, + ref_repo, + ref_xport, + ref_xport_cfg, + ref_pub, +): + """Add missing data from reference repository to target repository.""" + + tmani = get_manifest(target_repo, pub, latest_pkg) + trstore = target_repo.get_pub_rstore(pub) + + thashes = frozenset(ta.hash for ta in tmani.gen_actions() if ta.has_payload) + rhashes = frozenset(ra.hash for ra in rmani.gen_actions() if ra.has_payload) + possible = rhashes - thashes + + if ref.scheme == "file": + for h in possible: + try: + target_repo.file(h) + continue + except ( + sr.RepositoryUnsupportedOperationError, + sr.RepositoryFileNotFoundError, + ): + pass + + try: + trstore.copy_file(h, ref_repo.file(h)) + except (EnvironmentError, sr.RepositoryFileNotFoundError) as e: + abort( + err=_("Could not reversion file " "{path}: {err}").format( + path=h, err=str(e) + ) + ) + return + + pkgdir = ref_xport_cfg.get_pkg_dir(pfmri) + mfile = ref_xport.multi_file_ni(ref_pub, pkgdir) + + downloaded = set() + for a in rmani.gen_actions(): + if a.has_payload and a.hash in possible and a.hash not in downloaded: + try: + target_repo.file(a.hash) + except ( + sr.RepositoryUnsupportedOperationError, + sr.RepositoryFileNotFoundError, + ): + downloaded.add(a.hash) + mfile.add_action(a) + mfile.wait_files() + + for h in downloaded: + src_path = os.path.join(pkgdir, h) + try: + trstore.insert_file(h, src_path) + except (EnvironmentError, sr.RepositoryFileNotFoundError) as e: + abort( + err=_("Could not reversion file " "{path}: {err}").format( + path=h, err=str(e) + ) + ) - global_settings.client_name = PKG_CLIENT_NAME +def main_func(): + global temp_root, repo_modified, repo_finished, repo_uri, tracker + global dry_run + + global_settings.client_name = PKG_CLIENT_NAME + + try: + opts, pargs = getopt.getopt(sys.argv[1:], "?c:i:np:r:s:u", ["help"]) + except getopt.GetoptError as e: + usage(_("illegal option -- {0}").format(e.opt)) + + dry_run = False + ref_repo_uri = None + repo_uri = os.getenv("PKG_REPO", None) + changes = set() + ignores = set() + publishers = set() + cmp_policy = CMP_ALL + + processed_pubs = 0 + + for opt, arg in opts: + if opt == "-c": + changes.add(arg) + elif opt == "-i": + ignores.add(arg) + elif opt == "-n": + dry_run = True + elif opt == "-p": + publishers.add(arg) + elif opt == "-r": + ref_repo_uri = misc.parse_uri(arg) + elif opt == "-s": + repo_uri = misc.parse_uri(arg) + elif opt == "-u": + cmp_policy = CMP_UNSIGNED + elif opt == "-?" or opt == "--help": + usage(retcode=pkgdefs.EXIT_OK) + + if pargs: + usage(_("Unexpected argument(s): {0}").format(" ".join(pargs))) + + if not repo_uri: + usage(_("A target repository must be provided.")) + + if not ref_repo_uri: + usage(_("A reference repository must be provided.")) + + target = publisher.RepositoryURI(misc.parse_uri(repo_uri)) + if target.scheme != "file": + abort(err=_("Target repository must be filesystem-based.")) + try: + target_repo = sr.Repository( + read_only=dry_run, root=target.get_pathname() + ) + except sr.RepositoryError as e: + abort(str(e)) + + # Use the tmp directory in target repo for efficient file rename since + # files are in the same file system. + temp_root = target_repo.temp_root + if not os.path.exists(temp_root): + os.makedirs(temp_root) + + ref_incoming_dir = tempfile.mkdtemp(dir=temp_root) + ref_pkg_root = tempfile.mkdtemp(dir=temp_root) + + ref_xport, ref_xport_cfg = transport.setup_transport() + ref_xport_cfg.incoming_root = ref_incoming_dir + ref_xport_cfg.pkg_root = ref_pkg_root + transport.setup_publisher( + ref_repo_uri, "ref", ref_xport, ref_xport_cfg, remote_prefix=True + ) + + ref_repo = None + ref = publisher.RepositoryURI(misc.parse_uri(ref_repo_uri)) + if ref.scheme == "file": try: - opts, pargs = getopt.getopt(sys.argv[1:], "?c:i:np:r:s:u", - ["help"]) - except getopt.GetoptError as e: - usage(_("illegal option -- {0}").format(e.opt)) - - dry_run = False - ref_repo_uri = None - repo_uri = os.getenv("PKG_REPO", None) - changes = set() - ignores = set() - publishers = set() - cmp_policy = CMP_ALL - - processed_pubs = 0 - - for opt, arg in opts: - if opt == "-c": - changes.add(arg) - elif opt == "-i": - ignores.add(arg) - elif opt == "-n": - dry_run = True - elif opt == "-p": - publishers.add(arg) - elif opt == "-r": - ref_repo_uri = misc.parse_uri(arg) - elif opt == "-s": - repo_uri = misc.parse_uri(arg) - elif opt == "-u": - cmp_policy = CMP_UNSIGNED - elif opt == "-?" or opt == "--help": - usage(retcode=pkgdefs.EXIT_OK) - - if pargs: - usage(_("Unexpected argument(s): {0}").format(" ".join(pargs))) - - if not repo_uri: - usage(_("A target repository must be provided.")) - - if not ref_repo_uri: - usage(_("A reference repository must be provided.")) - - target = publisher.RepositoryURI(misc.parse_uri(repo_uri)) - if target.scheme != "file": - abort(err=_("Target repository must be filesystem-based.")) - try: - target_repo = sr.Repository(read_only=dry_run, - root=target.get_pathname()) + # It is possible that the client does not + # have write access to the reference repo + # so open it read-only to prevent the + # attempt to create a lock file in it. + ref_repo = sr.Repository(read_only=True, root=ref.get_pathname()) except sr.RepositoryError as e: - abort(str(e)) - - # Use the tmp directory in target repo for efficient file rename since - # files are in the same file system. - temp_root = target_repo.temp_root - if not os.path.exists(temp_root): - os.makedirs(temp_root) - - ref_incoming_dir = tempfile.mkdtemp(dir=temp_root) - ref_pkg_root = tempfile.mkdtemp(dir=temp_root) - - ref_xport, ref_xport_cfg = transport.setup_transport() - ref_xport_cfg.incoming_root = ref_incoming_dir - ref_xport_cfg.pkg_root = ref_pkg_root - transport.setup_publisher(ref_repo_uri, "ref", ref_xport, - ref_xport_cfg, remote_prefix=True) - - ref_repo = None - ref = publisher.RepositoryURI(misc.parse_uri(ref_repo_uri)) - if ref.scheme == "file": - try: - # It is possible that the client does not - # have write access to the reference repo - # so open it read-only to prevent the - # attempt to create a lock file in it. - ref_repo = sr.Repository(read_only=True, - root=ref.get_pathname()) - except sr.RepositoryError as e: - abort(str(e)) - - tracker = get_tracker() - - for pub in target_repo.publishers: - if publishers and pub not in publishers \ - and '*' not in publishers: - continue - - msg(_("Processing packages for publisher {0} ...").format(pub)) - # Find the matching pub in the ref repo. - for ref_pub in ref_xport_cfg.gen_publishers(): - if ref_pub.prefix == pub: - found = True - break - else: - txt = _("Publisher {0} not found in reference " - "repository.").format(pub) - if publishers: - abort(err=txt) - else: - txt += _(" Skipping.") - msg(txt) - continue - - processed_pubs += 1 - - rev = do_reversion(pub, ref_pub, target_repo, ref_xport, - changes, ignores, cmp_policy, ref_repo, ref, ref_xport_cfg) - - # If anything was reversioned rebuild the catalog and index - # to reflect those changes. - if rev and not dry_run: - msg(_("Rebuilding repository catalog.")) - target_repo.rebuild(pub=pub, build_index=True) - repo_finished = True - - ret = pkgdefs.EXIT_OK - if processed_pubs == 0: - msg(_("No matching publishers could be found.")) - ret = pkgdefs.EXIT_OOPS - cleanup() - return ret + abort(str(e)) + + tracker = get_tracker() + + for pub in target_repo.publishers: + if publishers and pub not in publishers and "*" not in publishers: + continue + + msg(_("Processing packages for publisher {0} ...").format(pub)) + # Find the matching pub in the ref repo. + for ref_pub in ref_xport_cfg.gen_publishers(): + if ref_pub.prefix == pub: + found = True + break + else: + txt = _( + "Publisher {0} not found in reference " "repository." + ).format(pub) + if publishers: + abort(err=txt) + else: + txt += _(" Skipping.") + msg(txt) + continue + + processed_pubs += 1 + + rev = do_reversion( + pub, + ref_pub, + target_repo, + ref_xport, + changes, + ignores, + cmp_policy, + ref_repo, + ref, + ref_xport_cfg, + ) + + # If anything was reversioned rebuild the catalog and index + # to reflect those changes. + if rev and not dry_run: + msg(_("Rebuilding repository catalog.")) + target_repo.rebuild(pub=pub, build_index=True) + repo_finished = True + + ret = pkgdefs.EXIT_OK + if processed_pubs == 0: + msg(_("No matching publishers could be found.")) + ret = pkgdefs.EXIT_OOPS + cleanup() + return ret # @@ -892,40 +990,39 @@ def main_func(): # so that we can more easily detect these in testing of the CLI commands. # if __name__ == "__main__": - misc.setlocale(locale.LC_ALL, "", error) - gettext.install("pkg", "/usr/share/locale") - misc.set_fd_limits(printer=error) - - if six.PY3: - # disable ResourceWarning: unclosed file - warnings.filterwarnings("ignore", category=ResourceWarning) - try: - __ret = main_func() - except PipeError: - # We don't want to display any messages here to prevent - # possible further broken pipe (EPIPE) errors. - cleanup(no_msg =True) - __ret = pkgdefs.EXIT_OOPS - except (KeyboardInterrupt, api_errors.CanceledException): - cleanup() - __ret = pkgdefs.EXIT_OOPS - except (actions.ActionError, RuntimeError, - api_errors.ApiException) as _e: - error(_e) - cleanup() - __ret = pkgdefs.EXIT_OOPS - except EnvironmentError as _e: - error(api_errors._convert_error(_e)) - cleanup() - __ret = pkgdefs.EXIT_OOPS - except SystemExit as _e: - cleanup() - raise _e - except: - traceback.print_exc() - error(misc.get_traceback_message()) - __ret = 99 - sys.exit(__ret) + misc.setlocale(locale.LC_ALL, "", error) + gettext.install("pkg", "/usr/share/locale") + misc.set_fd_limits(printer=error) + + if six.PY3: + # disable ResourceWarning: unclosed file + warnings.filterwarnings("ignore", category=ResourceWarning) + try: + __ret = main_func() + except PipeError: + # We don't want to display any messages here to prevent + # possible further broken pipe (EPIPE) errors. + cleanup(no_msg=True) + __ret = pkgdefs.EXIT_OOPS + except (KeyboardInterrupt, api_errors.CanceledException): + cleanup() + __ret = pkgdefs.EXIT_OOPS + except (actions.ActionError, RuntimeError, api_errors.ApiException) as _e: + error(_e) + cleanup() + __ret = pkgdefs.EXIT_OOPS + except EnvironmentError as _e: + error(api_errors._convert_error(_e)) + cleanup() + __ret = pkgdefs.EXIT_OOPS + except SystemExit as _e: + cleanup() + raise _e + except: + traceback.print_exc() + error(misc.get_traceback_message()) + __ret = 99 + sys.exit(__ret) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/publish/update_file_layout.py b/src/util/publish/update_file_layout.py index 2a7c40008..2398a5dcf 100755 --- a/src/util/publish/update_file_layout.py +++ b/src/util/publish/update_file_layout.py @@ -42,87 +42,92 @@ logger = global_settings.logger + def error(text, cmd=None): - """Emit an error message prefixed by the command name """ + """Emit an error message prefixed by the command name""" + + if cmd: + text = "{0}: {1}".format(cmd, text) + pkg_cmd = "pkg.migrate " + else: + pkg_cmd = "pkg.migrate: " - if cmd: - text = "{0}: {1}".format(cmd, text) - pkg_cmd = "pkg.migrate " - else: - pkg_cmd = "pkg.migrate: " + # If we get passed something like an Exception, we can convert + # it down to a string. + text = str(text) - # If we get passed something like an Exception, we can convert - # it down to a string. - text = str(text) + # If the message starts with whitespace, assume that it should come + # *before* the command-name prefix. + text_nows = text.lstrip() + ws = text[: len(text) - len(text_nows)] - # If the message starts with whitespace, assume that it should come - # *before* the command-name prefix. - text_nows = text.lstrip() - ws = text[:len(text) - len(text_nows)] + # This has to be a constant value as we can't reliably get our actual + # program name on all platforms. + logger.error(ws + pkg_cmd + text_nows) - # This has to be a constant value as we can't reliably get our actual - # program name on all platforms. - logger.error(ws + pkg_cmd + text_nows) def main_func(): - if len(sys.argv) != 2: - emsg(_("pkg.migrate takes a single directory as a parameter.")) - return 2 - - dir_loc = os.path.abspath(sys.argv[1]) - - if not os.path.isdir(dir_loc): - emsg(_("The argument must be a directory to migrate from older " - "layouts to the current\npreferred layout.")) - return 2 - - fm = file_manager.FileManager(root=dir_loc, readonly=False) - try: - for f in fm.walk(): - # A non-readonly FileManager will move a file under a - # non-preferred layout to the preferred layout during a - # lookup. - fm.lookup(f) - except file_manager.UnrecognizedFilePaths as e: - emsg(e) - return 1 - return 0 + if len(sys.argv) != 2: + emsg(_("pkg.migrate takes a single directory as a parameter.")) + return 2 + + dir_loc = os.path.abspath(sys.argv[1]) + + if not os.path.isdir(dir_loc): + emsg( + _( + "The argument must be a directory to migrate from older " + "layouts to the current\npreferred layout." + ) + ) + return 2 + + fm = file_manager.FileManager(root=dir_loc, readonly=False) + try: + for f in fm.walk(): + # A non-readonly FileManager will move a file under a + # non-preferred layout to the preferred layout during a + # lookup. + fm.lookup(f) + except file_manager.UnrecognizedFilePaths as e: + emsg(e) + return 1 + return 0 if __name__ == "__main__": - setlocale(locale.LC_ALL, "") - gettext.install("pkg", "/usr/share/locale") + setlocale(locale.LC_ALL, "") + gettext.install("pkg", "/usr/share/locale") - traceback_str = misc.get_traceback_message() + traceback_str = misc.get_traceback_message() + try: + # Out of memory errors can be raised as EnvironmentErrors with + # an errno of ENOMEM, so in order to handle those exceptions + # with other errnos, we nest this try block and have the outer + # one handle the other instances. try: - # Out of memory errors can be raised as EnvironmentErrors with - # an errno of ENOMEM, so in order to handle those exceptions - # with other errnos, we nest this try block and have the outer - # one handle the other instances. - try: - __ret = main_func() - except (MemoryError, EnvironmentError) as __e: - if isinstance(__e, EnvironmentError) and \ - __e.errno != errno.ENOMEM: - raise - if __img: - __img.history.abort(RESULT_FAILED_OUTOFMEMORY) - error("\n" + misc.out_of_memory()) - __ret = 1 - except SystemExit as __e: + __ret = main_func() + except (MemoryError, EnvironmentError) as __e: + if isinstance(__e, EnvironmentError) and __e.errno != errno.ENOMEM: raise - except (PipeError, KeyboardInterrupt): - if __img: - __img.history.abort(RESULT_CANCELED) - # We don't want to display any messages here to prevent - # possible further broken pipe (EPIPE) errors. - __ret = 1 - except: - traceback.print_exc() - error(traceback_str) - __ret = 99 - sys.exit(__ret) + if __img: + __img.history.abort(RESULT_FAILED_OUTOFMEMORY) + error("\n" + misc.out_of_memory()) + __ret = 1 + except SystemExit as __e: + raise + except (PipeError, KeyboardInterrupt): + if __img: + __img.history.abort(RESULT_CANCELED) + # We don't want to display any messages here to prevent + # possible further broken pipe (EPIPE) errors. + __ret = 1 + except: + traceback.print_exc() + error(traceback_str) + __ret = 99 + sys.exit(__ret) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/qual-simulator/depot.py b/src/util/qual-simulator/depot.py index 60ab24c2b..73c6b7f10 100644 --- a/src/util/qual-simulator/depot.py +++ b/src/util/qual-simulator/depot.py @@ -25,6 +25,7 @@ # from __future__ import division + # pylint is picky about this message: # old-division; pylint: disable=W1619 import math @@ -84,135 +85,149 @@ class RepositoryURI(object): - def __init__(self, label, speed, cspeed, error_rate=ERROR_FREE, - error_type=ERROR_T_NET, modality=MODAL_SINGLE): - """Create a RepositoryURI object. The 'speed' argument - gives the speed in kB/s. The 'cspeed' argument gives the - connect time in milliseconds. The 'error_rate' variable - defines how often errors occur. The error_type is - defined by 'error_type'. The 'modality' argument - defines the different speed distributions.""" - - # Production members - self.uri = "http://" + label - self.priority = 1 - - # Simulator members - self.label = label - self.speed = speed * 1024 - self.cspeed = cspeed / 1000.0 - self.maxtx = 10000.0 - self.warmtx = 1000.0 - self.minspeed = .1 - self.__tx = 0 - self.__error_rate = error_rate / 1000.0 - self.__error_type = error_type - self.__response_modality = modality - self.__decay = 0.9 - self.__proxy = None - self.__system = None - self.aggregate_decay = 1 - - self.stats = stats.RepoStats(self) - - def __get_proxy(self): - return self.__proxy - - def __set_proxy(self, val): - self.__proxy = val - - def __get_system(self): - return self.__system - - def __set_system(self, val): - self.__system = val - - proxy = property(__get_proxy, __set_proxy, None, "Proxy of the " - "repository URI.") - - system = property(__get_system, __set_system, None, "System publisher " - "of the repository URI.") - - def key(self): - """Returns a value that can be used to identify this RepoURI - uniquely for the transport system. Normally, this would be done - using __hash__() however, TransportRepoURI objects are not - guaranteed to be immutable. - - The key is a (uri, proxy) tuple, where the proxy is - the proxy used to reach that URI. Note that in the transport - system, we may choose to override the proxy value here.""" - - u = self.uri - p = self.__proxy - - if self.uri: - u = self.uri.rstrip("/") - return (u, p) - - def speed_single(self, size): - """Implements a depot that runs at a single speed.""" - - return size / random.gauss(self.speed, self.speed / 4) - - def speed_increasing(self, size): - """Depot's speed gradually increases over time.""" - - s = min(self.minspeed + (self.__tx / self.warmtx), 1) * \ - random.gauss(self.speed, self.speed / 4) - - return size / s - - def speed_decay(self, size): - """Depot gets slower as time goes on.""" - - if random.uniform(0., 1.) < 0.05: - self.aggregate_decay *= self.__decay - - return size / (self.aggregate_decay * - random.gauss(self.speed, self.speed / 4)) - - def request(self, rc, size=None): - """Simulate a transport request using RepoChooser 'rc'. - Size is given in the 'size' argument.""" - - errors = 0 - - if not size: - size = random.randint(1, 1000) * 1024 - - if not rc[self.key()].used: - rc[self.key()].record_connection(self.cspeed) - else: - conn_choose = (1 / self.maxtx) * \ - math.exp(-1 / self.maxtx) - if random.random() < conn_choose: - rc[self.key()].record_connection(self.cspeed) + def __init__( + self, + label, + speed, + cspeed, + error_rate=ERROR_FREE, + error_type=ERROR_T_NET, + modality=MODAL_SINGLE, + ): + """Create a RepositoryURI object. The 'speed' argument + gives the speed in kB/s. The 'cspeed' argument gives the + connect time in milliseconds. The 'error_rate' variable + defines how often errors occur. The error_type is + defined by 'error_type'. The 'modality' argument + defines the different speed distributions.""" + + # Production members + self.uri = "http://" + label + self.priority = 1 + + # Simulator members + self.label = label + self.speed = speed * 1024 + self.cspeed = cspeed / 1000.0 + self.maxtx = 10000.0 + self.warmtx = 1000.0 + self.minspeed = 0.1 + self.__tx = 0 + self.__error_rate = error_rate / 1000.0 + self.__error_type = error_type + self.__response_modality = modality + self.__decay = 0.9 + self.__proxy = None + self.__system = None + self.aggregate_decay = 1 + + self.stats = stats.RepoStats(self) + + def __get_proxy(self): + return self.__proxy + + def __set_proxy(self, val): + self.__proxy = val + + def __get_system(self): + return self.__system + + def __set_system(self, val): + self.__system = val + + proxy = property( + __get_proxy, __set_proxy, None, "Proxy of the " "repository URI." + ) + + system = property( + __get_system, + __set_system, + None, + "System publisher " "of the repository URI.", + ) + + def key(self): + """Returns a value that can be used to identify this RepoURI + uniquely for the transport system. Normally, this would be done + using __hash__() however, TransportRepoURI objects are not + guaranteed to be immutable. + + The key is a (uri, proxy) tuple, where the proxy is + the proxy used to reach that URI. Note that in the transport + system, we may choose to override the proxy value here.""" + + u = self.uri + p = self.__proxy + + if self.uri: + u = self.uri.rstrip("/") + return (u, p) + + def speed_single(self, size): + """Implements a depot that runs at a single speed.""" + + return size / random.gauss(self.speed, self.speed / 4) + + def speed_increasing(self, size): + """Depot's speed gradually increases over time.""" + + s = min(self.minspeed + (self.__tx / self.warmtx), 1) * random.gauss( + self.speed, self.speed / 4 + ) + + return size / s + + def speed_decay(self, size): + """Depot gets slower as time goes on.""" + + if random.uniform(0.0, 1.0) < 0.05: + self.aggregate_decay *= self.__decay + + return size / ( + self.aggregate_decay * random.gauss(self.speed, self.speed / 4) + ) + + def request(self, rc, size=None): + """Simulate a transport request using RepoChooser 'rc'. + Size is given in the 'size' argument.""" + + errors = 0 + + if not size: + size = random.randint(1, 1000) * 1024 + + if not rc[self.key()].used: + rc[self.key()].record_connection(self.cspeed) + else: + conn_choose = (1 / self.maxtx) * math.exp(-1 / self.maxtx) + if random.random() < conn_choose: + rc[self.key()].record_connection(self.cspeed) - rc[self.key()].record_tx() - self.__tx += 1 + rc[self.key()].record_tx() + self.__tx += 1 + + if random.random() < self.__error_rate: + if self.__error_type == ERROR_T_DECAYABLE: + rc[self.key()].record_error(decayable=True) + elif self.__error_type == ERROR_T_CONTENT: + rc[self.key()].record_error(content=True) + else: + rc[self.key()].record_error() + return (1, size, None) - if random.random() < self.__error_rate: - if self.__error_type == ERROR_T_DECAYABLE: - rc[self.key()].record_error(decayable=True) - elif self.__error_type == ERROR_T_CONTENT: - rc[self.key()].record_error(content=True) - else: - rc[self.key()].record_error() - return (1, size, None) + if self.__response_modality == MODAL_SINGLE: + time = self.speed_single(size) + elif self.__response_modality == MODAL_DECAY: + time = self.speed_decay(size) + elif self.__response_modality == MODAL_INCREASING: + time = self.speed_increasing(size) + else: + raise RuntimeError("no modality") - if self.__response_modality == MODAL_SINGLE: - time = self.speed_single(size) - elif self.__response_modality == MODAL_DECAY: - time = self.speed_decay(size) - elif self.__response_modality == MODAL_INCREASING: - time = self.speed_increasing(size) - else: - raise RuntimeError("no modality") + rc[self.key()].record_progress(size, time) - rc[self.key()].record_progress(size, time) + return (errors, size, time) - return (errors, size, time) # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker diff --git a/src/util/qual-simulator/scenario.py b/src/util/qual-simulator/scenario.py index adf2a65db..279b8a07f 100644 --- a/src/util/qual-simulator/scenario.py +++ b/src/util/qual-simulator/scenario.py @@ -33,74 +33,95 @@ import stats import pkg.misc as misc -class Scenario(object): - """A Scenario has a list of depot.RepositoryURIs.""" - - def __init__(self, title): - self.title = title - self.repo_uris = [] - self.origin_uris = [] - - def get_repo_uris(self): - return self.repo_uris - - def add_repo_uri(self, label, speed, cspeed, - error_rate=depot.ERROR_FREE, error_type=depot.ERROR_T_NET, - speed_distribution=depot.MODAL_SINGLE): - - r = depot.RepositoryURI(label, speed, cspeed, - error_rate=error_rate, error_type=error_type, - modality=speed_distribution) - - self.repo_uris.append(r) - - def add_origin_uri(self, label, speed, cspeed, - error_rate=depot.ERROR_FREE, error_type=depot.ERROR_T_NET, - speed_distribution=depot.MODAL_SINGLE): - - r = depot.RepositoryURI(label, speed, cspeed, - error_rate=error_rate, error_type=error_type, - modality=speed_distribution) - - self.origin_uris.append(r) - - def get_megabytes(self): - return self.__total_mb - def set_megabytes(self, mb): - self.__total_mb = mb - - def run(self): - print("SCENARIO: {0}".format(self.title)) - - total = self.__total_mb * 1024 * 1024 - - rc = stats.RepoChooser() - urilist = self.repo_uris[:] - urilist.extend(self.origin_uris) - - while total > 0: - s = rc.get_repostats(urilist, self.origin_uris) - - n = len(s) - m = rc.get_num_visited(urilist) - - if m < n: - c = 10 - else: - c = 100 - - print("bytes left {0:d}; retrieving {1:d} files".format( - total, c)) - rc.dump() - - # get 100 files - r = s[0] - for n in range(c): - req = r[1].request(rc) - total -= req[1] +class Scenario(object): + """A Scenario has a list of depot.RepositoryURIs.""" + + def __init__(self, title): + self.title = title + self.repo_uris = [] + self.origin_uris = [] + + def get_repo_uris(self): + return self.repo_uris + + def add_repo_uri( + self, + label, + speed, + cspeed, + error_rate=depot.ERROR_FREE, + error_type=depot.ERROR_T_NET, + speed_distribution=depot.MODAL_SINGLE, + ): + r = depot.RepositoryURI( + label, + speed, + cspeed, + error_rate=error_rate, + error_type=error_type, + modality=speed_distribution, + ) + + self.repo_uris.append(r) + + def add_origin_uri( + self, + label, + speed, + cspeed, + error_rate=depot.ERROR_FREE, + error_type=depot.ERROR_T_NET, + speed_distribution=depot.MODAL_SINGLE, + ): + r = depot.RepositoryURI( + label, + speed, + cspeed, + error_rate=error_rate, + error_type=error_type, + modality=speed_distribution, + ) + + self.origin_uris.append(r) + + def get_megabytes(self): + return self.__total_mb + + def set_megabytes(self, mb): + self.__total_mb = mb + + def run(self): + print("SCENARIO: {0}".format(self.title)) + + total = self.__total_mb * 1024 * 1024 + + rc = stats.RepoChooser() + urilist = self.repo_uris[:] + urilist.extend(self.origin_uris) + + while total > 0: + s = rc.get_repostats(urilist, self.origin_uris) + + n = len(s) + m = rc.get_num_visited(urilist) + + if m < n: + c = 10 + else: + c = 100 + + print("bytes left {0:d}; retrieving {1:d} files".format(total, c)) + rc.dump() + + # get 100 files + r = s[0] + for n in range(c): + req = r[1].request(rc) + total -= req[1] + + rc.dump() - rc.dump() misc.setlocale(locale.LC_ALL) gettext.install("pkg", "/usr/share/locale") @@ -144,8 +165,12 @@ def run(self): one_mirror = Scenario("origin and a faster, but decaying, mirror") one_mirror.add_origin_uri("origin", depot.SPEED_FAST, depot.CSPEED_NEARBY) -one_mirror.add_repo_uri("mirror", depot.SPEED_VERY_FAST, depot.CSPEED_LAN, - speed_distribution=depot.MODAL_DECAY) +one_mirror.add_repo_uri( + "mirror", + depot.SPEED_VERY_FAST, + depot.CSPEED_LAN, + speed_distribution=depot.MODAL_DECAY, +) one_mirror.set_megabytes(total_mb) @@ -156,8 +181,12 @@ def run(self): one_mirror = Scenario("origin and a slower, but increasing, mirror") one_mirror.add_origin_uri("origin", depot.SPEED_MEDIUM, depot.CSPEED_MEDIUM) -one_mirror.add_repo_uri("mirror", depot.SPEED_FAST, depot.CSPEED_LAN, - speed_distribution=depot.MODAL_INCREASING) +one_mirror.add_repo_uri( + "mirror", + depot.SPEED_FAST, + depot.CSPEED_LAN, + speed_distribution=depot.MODAL_INCREASING, +) one_mirror.set_megabytes(total_mb) @@ -166,11 +195,18 @@ def run(self): # Scenario 2e. An origin and mirror, mirror encountering decyable transport # errors. -one_mirror = Scenario("origin and a faster mirror. Mirror gets decayable errors") +one_mirror = Scenario( + "origin and a faster mirror. Mirror gets decayable errors" +) one_mirror.add_origin_uri("origin", depot.SPEED_FAST, depot.CSPEED_LAN) -one_mirror.add_repo_uri("mirror", depot.SPEED_SLIGHTLY_FASTER, depot.CSPEED_LAN, - error_rate=depot.ERROR_LOW, error_type=depot.ERROR_T_DECAYABLE) +one_mirror.add_repo_uri( + "mirror", + depot.SPEED_SLIGHTLY_FASTER, + depot.CSPEED_LAN, + error_rate=depot.ERROR_LOW, + error_type=depot.ERROR_T_DECAYABLE, +) one_mirror.set_megabytes(total_mb) @@ -182,10 +218,13 @@ def run(self): one_mirror = Scenario("origin and two mirrors") one_mirror.add_origin_uri("origin", depot.SPEED_FAST, depot.CSPEED_NEARBY) -one_mirror.add_repo_uri("mirror", depot.SPEED_SLIGHTLY_FASTER, - depot.CSPEED_LAN) -one_mirror.add_repo_uri("mirror2", depot.SPEED_VERY_FAST, depot.CSPEED_NEARBY, - speed_distribution=depot.MODAL_DECAY) +one_mirror.add_repo_uri("mirror", depot.SPEED_SLIGHTLY_FASTER, depot.CSPEED_LAN) +one_mirror.add_repo_uri( + "mirror2", + depot.SPEED_VERY_FAST, + depot.CSPEED_NEARBY, + speed_distribution=depot.MODAL_DECAY, +) one_mirror.set_megabytes(total_mb) @@ -199,8 +238,9 @@ def run(self): one_mirror.add_repo_uri("mirror", depot.SPEED_SLIGHTLY_FASTER, depot.CSPEED_LAN) one_mirror.add_repo_uri("mirror2", depot.SPEED_MEDIUM, depot.CSPEED_SLOW) one_mirror.add_repo_uri("mirror3", depot.SPEED_SLOW, depot.CSPEED_SLOW) -one_mirror.add_repo_uri("mirror4", depot.SPEED_VERY_SLOW, - depot.CSPEED_VERY_SLOW) +one_mirror.add_repo_uri( + "mirror4", depot.SPEED_VERY_SLOW, depot.CSPEED_VERY_SLOW +) one_mirror.add_repo_uri("mirror5", depot.SPEED_SLOW, depot.CSPEED_FARAWAY) one_mirror.set_megabytes(total_mb) @@ -211,11 +251,14 @@ def run(self): six_origin = Scenario("six origins") -six_origin.add_origin_uri("origin1", depot.SPEED_VERY_SLOW, - depot.CSPEED_VERY_SLOW) +six_origin.add_origin_uri( + "origin1", depot.SPEED_VERY_SLOW, depot.CSPEED_VERY_SLOW +) six_origin.add_origin_uri("origin2", depot.SPEED_SLOW, depot.CSPEED_FARAWAY) six_origin.add_origin_uri("origin3", depot.SPEED_MODERATE, depot.CSPEED_MEDIUM) -six_origin.add_origin_uri("origin4", depot.SPEED_SLIGHTLY_FASTER, depot.CSPEED_LAN) +six_origin.add_origin_uri( + "origin4", depot.SPEED_SLIGHTLY_FASTER, depot.CSPEED_LAN +) six_origin.add_origin_uri("origin5", depot.SPEED_MEDIUM, depot.CSPEED_SLOW) six_origin.add_origin_uri("origin6", depot.SPEED_FAST, depot.CSPEED_MEDIUM) @@ -224,4 +267,4 @@ def run(self): six_origin.run() # Vim hints -# vim:ts=8:sw=8:et:fdm=marker +# vim:ts=4:sw=4:et:fdm=marker