Microsoft Word EQUATIONS FINAL 14 OK docx rubenszmmgmail com RubensZimbres NAÏVE BAYES
rubenszmm@gmail.com http://github.com/RubensZimbres NAÏVE BAYES 𝑃 𝑐 𝑎 𝑃(𝑎) 𝑃 𝑎𝑐 = 𝑃(𝑐) BAYES OPTIMAL CLASSIFIER arg max 𝑃 𝑥 𝑇 𝑃(𝑇|𝐷) NAÏVE BAYES CLASSIFIER arg max 𝑃 𝑆𝑝𝑜|𝑇𝑜𝑡 𝑃(𝑆𝑜𝑐|𝑆𝑝𝑜) BAYES MAP (maximum a posteriori) ℎ!"# = arg max 𝑃 𝑐|𝑎 𝑃(𝑎) MAXIMUM LIKELIHOOD ℎ!" = arg max 𝑃 𝑐|𝑎 TOTAL PROBABILITY 𝑇𝑜𝑡𝑎𝑙𝑃 𝐵 = 𝑃 𝐵|𝐴 𝑃(𝐴) MIXTURE MODELS 𝑃 𝐵 = 𝑃 𝐵|𝐴 𝑃(𝐴) MIXTURE OF GAUSSIANS ANOMALY DETECTION 1 𝑥−𝑥 ! 𝑃 𝑥𝑥 = 𝑒𝑥𝑝 − 𝜎 2𝜋𝜎 ! 𝑁! 𝐶! + 𝑁! 𝐶! 𝑍!" = 𝑁! + 𝑁! 𝑃(𝑍!" ) → 0.50 EM ALGORITHM 𝑃 𝑥 𝑃 𝑥|𝑥 𝐸 𝑠𝑡𝑒𝑝 𝑃 𝑥|𝑥 = 𝑃 𝑥 𝑃 𝑥 𝑃(𝑥|𝑥) 𝑀 𝑠𝑡𝑒𝑝 𝑃 𝑥′ = 𝑛 𝐸 𝑠𝑡𝑒𝑝 𝑃 𝑥|𝑥 = 𝐴𝑠𝑠𝑖𝑔𝑛 𝑣𝑎𝑙𝑢𝑒 𝑀 𝑠𝑡𝑒𝑝 𝑃 𝑥′ = 𝑃(𝐵 = 1|𝐴 = 1, 𝐶 = 0) LAPLACE ESTIMATE (small samples) 𝐴 + 0.5 𝑃 𝐴 = 𝐴+𝐵+1 BAYESIAN NETWORKS 𝑡𝑢𝑝𝑙𝑒𝑠 ¬ 𝑓𝑜𝑟 𝑦 = ∧ 𝑦 = LIMITES 𝑓 𝑥 + ℎ − 𝑓(𝑥) lim !→! ℎ ℎ = Δ𝑥 = 𝑥′ − 𝑥 DERIVADAS 𝜕 ! 𝑥 = 𝑛 𝑥 !!! 𝜕𝑥 𝜕 ! 𝜕𝑦 ! 𝜕𝑦 𝑦 = 𝜕𝑥 𝜕𝑦 𝜕𝑥 PRODUCT RULE 𝑑 𝑓 𝑥 𝑔 𝑥 = 𝑓′ 𝑥 𝑔 𝑥 + 𝑓 𝑥 𝑔′(𝑥) 𝑑𝑥 𝑑 𝑓(𝑥) 𝑓′ 𝑥 𝑔 𝑥 + 𝑓 𝑥 𝑔′(𝑥) = 𝑑𝑥 𝑔(𝑥) 𝑔(𝑥)! 𝑑 𝑑 2𝑓 𝑥 = 𝑓 𝑥 𝑑𝑥 𝑑𝑥 𝑑 𝑑 𝑑 𝑓 𝑥 +𝑔 𝑥 = 𝑓 𝑥 + 𝑔 𝑥 𝑑𝑥 𝑑𝑥 𝑑𝑥 𝑑 𝑑 𝑑 𝑓 𝑥 + 2𝑔 𝑥 = 𝑓 𝑥 + 𝑔 𝑥 𝑑𝑥 𝑑𝑥 𝑑𝑥 CHAIN RULE 𝑑 𝑔 𝑓 𝑥 = 𝑔! 𝑓(𝑥) 𝑓′(𝑥) 𝑑𝑥 solve f(x) apply in g’(x) VARIANCE (𝑥 − 𝑥)! 𝑉𝑎𝑟 = 𝑛−1 STANDARD DEVIATION 𝑉𝑎𝑟𝑖𝑎𝑛𝑐𝑒 𝑅! = COVARIANCE 𝑥 − 𝑥 (𝑦 − 𝑦) 𝐶𝑜𝑣 = 𝑛−1 CONFIDENCE INTERVAL 𝜎 𝑥 ± 1.96 𝑛 CHI SQUARED (𝑦 − 𝑦)! 𝛿 ! 𝐶ℎ𝑖 = = 𝑦 𝑦 R SQUARED 𝑛 𝑥𝑦 − 𝑥 𝑦 𝑥 ! − ( 𝑥)! 𝑛 𝑦 ! − ( 𝑦)! LOSS 𝐿𝑜𝑠𝑠 = 𝐵𝑖𝑎𝑠 ! + 𝑉𝑎𝑟𝑖𝑎𝑛𝑐𝑒 ! + 𝑁𝑜𝑖𝑠𝑒 𝑛 SUM OF SQUARED ERRORS (𝑦 − 𝑦)! 𝐸𝑤 = COST FUNCTION (𝑦 − 𝑦)! 𝐽 𝜃! ≔ 𝜃! − 𝜂 NUMBER OF EXAMPLES log(𝑁! ) + log (𝛿 ) 𝑚≥ 𝜖 𝑦 𝑤ℎ𝑒𝑟𝑒 𝜖 = ∧ 𝛿 = 𝑦 − 𝑦 𝑦 MARKOV CHAINS 𝑃!!! 𝑋 = 𝑥 = 𝑃! 𝑋 = 𝑥 𝑇(𝑥 → 𝑥) ! K NEAREST NEIGHBOR 𝑓(𝑥) 𝑓 𝑥 ← 𝑘 𝐷𝐸 𝑥! , 𝑥! = 𝑥! − 𝑥! ! LINEAR REGRESSION ! 𝑥! 𝑥! 𝑦 − 𝑥! 𝑥! 𝑥! 𝑦 𝑚! = 𝑥!! 𝑥!! − ( 𝑥! 𝑥! )! 𝑏 = 𝑦 − 𝑚! 𝑥! − 𝑚! 𝑥! + (𝑦!" − 𝑦!" )! WEIGHTED NEAREST NEIGHBOR 𝑓(𝑥) 𝑓 𝑥 = 𝐷(𝑥! 𝑥! )! 𝐷(𝑥! 𝑥! )! PRINCIPAL COMPONENTS ANALYSIS 𝑥′ = 𝑥 − 𝑥 𝐸𝑖𝑔𝑒𝑛𝑣𝑎𝑙𝑢𝑒 = 𝐴 − 𝜆𝐼 𝐸𝑖𝑔𝑒𝑛𝑣𝑒𝑐𝑡𝑜𝑟 = 𝐸𝑛𝑔𝑒𝑛𝑣𝑎𝑙𝑢𝑒 [𝐴] 𝑓 𝑥 = 𝐸𝑖𝑔𝑒𝑛𝑣𝑒𝑐𝑡𝑜𝑟 ! [𝑥!! 𝑥!" ] ! 𝑓 𝑥 = 𝑚! 𝑥! + 𝑏 !!! LOGISTIC REGRESSION 𝑃 𝑂𝑑𝑑𝑠 𝑅𝑎𝑡𝑖𝑜 = 𝑙𝑜𝑔 = 𝑚𝑥 + 𝑏 1−𝑃 𝑃 = 𝑒 !"!! 1−𝑃 𝑦 log (𝑦) + − 𝑦 log (1 − 𝑦) 𝐽 𝜃 =− 𝑛 𝑤ℎ𝑒𝑟𝑒 𝑦 = + 𝑒 !"!! 𝑓𝑜𝑟 𝑦 = ∧ 𝑦 = −2𝐿𝐿 → 𝑥 ! ~ 𝑥! ≠ 𝑥! ′ ~ 𝑥! ′ 𝑝 𝑚𝑥 + 𝑏 = 1−𝑝 𝑚𝑥 + 𝑏 𝑃 𝑎𝑐 = 𝑚𝑥 + 𝑏 + 𝐿𝑜𝑔𝑖𝑡 = 100 log (𝑃(𝑎|𝑐)) DECISION TREES ! 𝐸𝑛𝑡𝑟𝑜𝑝𝑦 = −𝑃 log (𝑃) !!! 𝐼𝑛𝑓𝑜𝐺𝑎𝑖𝑛 = 𝑃! −𝑃!! log 𝑃!! − 𝑃!(!!!) − log (𝑃!(!!!) ) RULE INDUCTION 𝐺𝑎𝑖𝑛 = 𝑃 [ −𝑃!!! log (𝑃) − (−𝑃! log (𝑃))] RULE VOTE Weight=accuracy coverage ENTROPY 𝐻 𝐴 =− 𝑃 𝐴 𝑙𝑜𝑔𝑃(𝐴) JOINT ENTROPY 𝐻 𝐴, 𝐵 = − 𝑃 𝐴, 𝐵 𝑙𝑜𝑔𝑃(𝐴, 𝐵) CONDITIONAL ENTROPY 𝐻 𝐴|𝐵 = − 𝑃 𝐴, 𝐵 𝑙𝑜𝑔𝑃(𝐴|𝐵) MUTUAL INFORMATION 𝐼 𝐴, 𝐵 = 𝐻 𝐴 − 𝐻(𝐴|𝐵) EIGENVECTOR CENTRALITY = PAGE RANK 1−𝑑 𝑃𝑅(𝐵) 𝑃𝑅(𝑛) 𝑃𝑅 𝐴 = −d + 𝑛 𝑂𝑢𝑡(𝐵) 𝑂𝑢𝑡(𝑛) where d=1 few connections RATING 𝑅 = 𝑅! + 𝛼 𝑤!" = 𝑤! (𝑅!" − 𝑅! ) SIMILARITY ! 𝑅!" − 𝑅! (𝑅!" − 𝑅! ) ! BATCH GRADIENT DESCENT (𝑦 − 𝑦)! 𝑥 𝐽 𝜃! ≔ 𝜃! ± 𝜂 2𝑛 STOCHASTIC GRADIENT DESCENT 𝐽 𝜃! ≔ 𝜃! ± 𝜂 (𝑦 − 𝑦)! 𝑥 NEURAL NETWORKS 𝑅!" − 𝑅! ! (𝑅!" − 𝑅! )! CONTENT-‐BASED RECOMMENDATION ! !"#$$ ! 𝑅𝑎𝑡𝑖𝑛𝑔 = 𝑓 𝑥 = 𝑜 = 𝑤! + 𝑥! 𝑦! !!! !!! COLLABORATIVE FILTERING 𝑅!" = 𝑅! + 𝛼 𝑅!" − 𝑅! ! 𝑅!" − 𝑅! (𝑅!" − 𝑅! ) ! 𝑅!" − 𝑅! ! (𝑅!" − 𝑅! )! 𝑤! 𝑥! !!! LOGIT log 𝑜𝑑𝑑𝑠 = 𝑤𝑥 + 𝑏 = 𝑙𝑜𝑔 𝑝 1−𝑝 SOFTMAX NORMALIZATION 𝑒 !"!! 𝑆(𝑓 𝑥 ) = 𝑒 !"!! CROSS ENTROPY 𝐻(𝑆 𝑓 𝑥 , 𝑓 𝑥 =− ! 𝑓 𝑥 𝑙𝑜𝑔𝑆(𝑓 𝑥 ) LOSS 𝐻(𝑆(𝑓 𝑥 , 𝑓(𝑥)) 𝐿𝑜𝑠𝑠 = 𝑁 L2 REGULARIZATION 𝜆 𝑤 ! 𝑤 ← 𝑤 − 𝜂 𝛿 𝑥 + SIGMOID + 𝑒 !(!"!!) RADIAL BASIS FUNCTION ℎ 𝑥 =𝑒 PERCEPTRON ! (!!!)! !! 𝑓 𝑥 = 𝑠𝑖𝑔𝑛 𝑤! 𝑥!" !!! PERCEPTRON TRAINING 𝑤! ← 𝑤! + ∆𝑤! ∆𝑤! = 𝜂 𝑡 − 𝑜 𝑥 ERROR FOR A SIGMOID 𝜖= 𝑡 − 𝑜 𝑜 − 𝑜 𝑥 AVOID OVERFIT NEURAL NETWORKS L2 (𝑡 − 𝑜)! !"# !"# 𝑤= + F 𝑤!"! where F=penalty BACKPROPAGATION 𝛿! = 𝑜! − 𝑜! (𝑡 − 𝑜! ) 𝛿! = 𝑜! − 𝑜! 𝐽! = ! !!! 𝑤!" 𝛿! 𝑤!" ← 𝑤!" + 𝜂!" 𝛿! 𝑥!" 𝑤! = + (𝑡 − 𝑜! ) ∆𝑤!" (𝑛) = 𝜂 𝛿! 𝑥!" + 𝑀 ∆𝑤!" (𝑛 − 1) where M=momentum NEURAL NETWORKS COST FUNCTION !! ! 𝜆 !!!!! !!! !!! 𝑡! log 𝑜 + − 𝑡 log (1 − 𝑜) + 𝑁 2𝑁 MOMENTUM Υ 𝜃 = 𝜃 − (𝛾𝑣!!! + 𝜂 ∇𝐽 𝜃 ) !!!! ! !!! 𝜃!" NESTEROV 𝜃 = 𝜃 − (𝛾𝑣!!! + 𝜂 ∇𝐽(𝜃 − 𝛾𝑣!!! )) ADAGRAD 𝜂 𝜃=𝜃− ∇𝐽(𝜃) 𝑆𝑆𝐺!"#$ + 𝜖 ADADELTA 𝑅𝑀𝑆[∆𝜃]!!! 𝜃=𝜃− 𝑅𝑀𝑆∇𝐽(𝜃) 𝑅𝑀𝑆 Δ𝜃 = 𝐸 ∆𝜃 ! + 𝜖 RMSprop 𝜂 𝜃=𝜃− ∇𝐽(𝜃) 𝐸 𝑔! + 𝜖 ADAM 𝜂 𝜃=𝜃− 𝑚 𝑣+𝜖 𝛽! 𝑚!!! + − 𝛽! ∇𝐽(𝜃) 𝑚= − 𝛽! 𝛽! 𝑣!!! + − 𝛽! ∇𝐽(𝜃)! 𝑣= − 𝛽! SUPPORT VECTOR MACHINES 𝑓 𝑥 = 𝑠𝑖𝑔𝑛 𝜆 𝑦 𝐾(𝑥! ∙ 𝑥! ) 𝑥! − 𝑥! 𝐾 𝑥! ∙ 𝑥! = 𝑒𝑥𝑝 − ! 𝑥! ∙ 𝑥! = 𝐾 𝑥! ∙ 𝑥! = 𝑒𝑥𝑝 − + (𝑦! − 𝑦! )! 𝑠𝑒𝑛𝜃 = + (𝑦!" − 𝑦!" )! 𝑥! 𝑥! − 𝑥! ! + (𝑦! − 𝑦! )! 𝑤𝑖𝑑𝑡ℎ!!"# 𝜆 → arg 𝑚𝑖𝑛 ∇𝐿 𝜆 → ∇𝐿 = 𝑦 = ∧ 𝑦 = −1 𝐷𝑜𝑡𝑃𝑟𝑜𝑑𝑢𝑐𝑡 = 𝑥! 𝑐𝑜𝑠𝜃 ! 𝑐𝑜𝑠 𝜃 + 𝑠𝑒𝑛! 𝜃 = ! 𝑥! − 𝑥! ! + (𝑦! − 𝑦! )! 𝑥! ! + 𝑦! ! SUPPORT VECTOR REGRESSION 𝑌 = 𝜆 𝐾 𝑥! ∙ 𝑥! + 𝑏 𝑤𝑖𝑑𝑡ℎ!!"# 𝑥! − 𝑥! (𝑥! ! + 𝑦! ! ) − RIDGE REGRESSION -‐ REGULARIZATION 𝑦 − 𝑦 ! 𝜆 𝑚 𝑚≔𝑚− − 𝑁 𝑁 𝜆 𝑦 = 𝜆 𝑚𝑥 + 𝑏 − 𝑁 LASSO REGRESSION -‐ REGULARIZATION (𝑦 − 𝑦)! 𝜆 𝑏 𝑏≔ + 𝑁 𝑁 𝑚 → 𝜆 𝑦 = 𝑚𝑥 + 𝜆 𝑏 + 𝑁 SKEWNESS Skewness < 1 KOLMOGOROV SMIRNOV Normal sig > .005 NÃO PARAMÉTRICOS T test = Normal Teste U Mann Whitney sig < .05 CRONBACH > .60 .70 MÉDIA ARITMÉTICA 𝑥 𝑁 MÉDIA GEOMÉTRICA ! 1,2,4 = 1.2.4 MEDIANA 𝑀𝑎𝑥 − 𝑀𝑖𝑛 TESTE t 𝑥! − 𝑥! − (𝜇! ! ) = ! ! Diferenỗa significante sig < .05 TESTE t 2 AMOSTRAS Levene Variância ANOVA + 3 𝑉𝑎𝑟𝑖â𝑛𝑐𝑖𝑎 𝑒𝑛𝑡𝑟𝑒 𝑔𝑟𝑢𝑝𝑜𝑠 𝐹= 𝑉𝑎𝑟𝑖â𝑛𝑐𝑖𝑎 𝑑𝑒𝑛𝑡𝑟𝑜 𝑑𝑜 𝑔𝑟𝑢𝑝𝑜 Sig < .05 TOLERÂNCIA Tolerância > .1 𝑇𝑜𝑙𝑒𝑟â𝑛𝑐𝑖𝑎 = 𝑉𝐼𝐹 VARIANCE INFLATION FACTOR VIF 15% ANÁLISE DISCRIMINANTE Box M sig < .05 rejeita H0 Wilk’s Lambda sig < .05 𝑥 ! ~ 𝑥! ≠ 𝑥! ′ ~ 𝑥! ′ 1 𝑥−𝑥 𝑃 𝑥𝑥 = 𝑒𝑥𝑝 − 𝜎 2𝜋𝜎 ! 𝑁! 𝐶! + 𝑁! 𝐶! 𝑍!" = 𝑁! + 𝑁! ERROR MARGIN 𝜎 1.96 𝑁 ACCURACY Confidence Interval ~ P value HYPOTHESES TESTING P value < .05 ! TRANSFORMATION OK 𝑥 < 𝜎 MULTICOLLINEARITY Correlation > .90 VIF .1 SUM OF SQUARES (explain) 𝑆𝑆!"#!"$$%&' (𝑁 − 𝑐𝑜𝑒𝑓) 𝐹!"#$% = 𝑐𝑜𝑒𝑓 − 𝑆𝑆!"#$%&'(# STANDARD ERROR ESTIMATE (SEE) 𝑆𝐸𝐸 = 𝑆𝑢𝑚𝑆𝑞𝑢𝑎𝑟𝑒𝑑𝐸𝑟𝑟𝑜𝑟𝑠 𝑛−2 MAHALANOBIS DISTANCE same variable NET PRESENT VALUE 𝑃! = 𝑃! 𝜃 ! 𝑃! = 𝑃! 𝜃 !! MARKOV DECISION PROCESS 𝑈! = 𝑅! + 𝛿 max ! (𝑦 − 𝑛−2 𝑇 𝑠, 𝑎, 𝑠′ 𝑈(𝑠′) ! 𝜋! = argmax 𝑇 𝑠, 𝑎, 𝑠′ 𝑈(𝑠′) ! ! !! 𝑇 𝑠, 𝑎, 𝑠 ! max 𝑄(𝑠 ! , 𝑎′) 𝑄!,! = 𝑅! + 𝛿 max 𝑦)! 𝑆𝐸𝐸 = (𝑥! − 𝑥! )! 𝜎! MANHATTAN DISTANCE L 𝑀𝑎𝑛ℎ = |𝑥! − 𝑥! | + |𝑦! − 𝑦! | 𝑀= ! ! ! 𝑄!,! ←! 𝑅! + 𝛿 max 𝑄 𝑠 ! , 𝑎′ ! PROBABILIDADE (coins) 𝑃(𝑎) 𝑃 𝑎 = 𝑃(𝐴) FREQUENTISTA 𝑚 𝑠𝑢𝑐𝑒𝑠𝑠𝑜𝑠 𝑒𝑣𝑒𝑛𝑡𝑜𝑠 lim = = = !→! 𝑛 𝑡𝑜𝑑𝑎𝑠 ỗ AXIOMTICA () 𝑃(𝐴, 𝐵, 𝐶) = TEROREMAS DE PROBABILIDADE UNIÃO = A ou B 𝑃(𝐴𝑈𝐵)!"#$%&!'(!) = 𝑃 𝐴 + 𝑃(𝐵) 𝑃(𝐴𝑈𝐵)!Ã! !"#$%&!'(!) = 𝑃 𝐴 + 𝑃 𝐵 − 𝑃(𝐴 ∩ 𝐵) 𝑃(𝐴𝑈𝐵𝑈𝐶)!Ã! !"#$%&!'(!) = 𝑃 𝐴 + 𝑃 𝐵 + 𝑃 𝐶 − 𝑃 𝐴 ∩ 𝐵 − 𝑃(𝐴 ∩ 𝐶) − 𝑃(𝐵 ∩ 𝐶) − 𝑃(𝐴 ∩ 𝐵 ∩ 𝐶) EVENTO COMPLEMENTAR 𝑃 Ã = − 𝑃(𝐴) PROBABILIDADE MARGINAL 𝑃(𝐴 = 𝑎) 𝑃 𝑎 = 𝑃(𝐴) PROBABILIDADE A e B 𝑃(𝐴 ∩ 𝐵) 𝑃 𝐴 𝑒 𝐵 = 𝑃(𝐵) PROBABILIDADE CONDICIONAL 𝑃 𝐴 𝐵 !"#$%$"#$"&$' = 𝑃(𝐴) BAYES (52 cartas , cancer) 𝑃(𝐴 ∩ 𝐵) 𝑃 𝐵 𝐴 𝑃(𝐴) 𝑃 𝐴𝐵 = = 𝑃(𝐵) 𝑃(𝐵) BINOMIAL DISTRIBUTION (0,1 sucesso) ỗ = ! (1 − 𝑃 𝑠 )! 𝑠𝑢𝑐𝑒𝑠𝑠𝑜 ỗ = ! (𝑃 𝑠 )! 𝑠𝑢𝑐𝑒𝑠𝑠𝑜 𝑆 𝑃 𝑆𝑢𝑐𝑒𝑠𝑠𝑜 = 𝑃 𝑠 𝑃(𝑠) 𝑠 !∈! INTEGRAIS ! 𝐹 𝑏 − 𝐹 𝑎 ! ! 𝑃 𝐷 = 𝑃 𝐵 = 𝑃 𝐴∩𝐵 = 𝑃 𝐴 𝑃(𝐵|𝐴) PROBABILITY k SUCCESS in n TRIALS 𝑛 𝑃 𝑘 𝑖𝑛 𝑛 = 𝑝! (1 − 𝑝)!!! 𝑘 1 𝑥 ! 𝑑𝑥 = 𝑥 ! = 2! − 1! 3 PRODUCT RULE 𝑐 𝑓′ 𝑥 𝑑𝑥 = 𝑐 !∈! 𝑐! 𝑃 𝑎 ! (1 − 𝑃 𝑎 )! 𝑎! 𝑐 − 𝑎 ! PROBABILIDADE TOTAL (urnas) ! CHAIN RULE 𝑓 𝑥 + 𝑔 𝑥 𝑑𝑥 = 𝑓′ 𝑥 𝑑𝑥 𝑓 𝑥 𝑑𝑥 + INTEGRATION Δ𝑥 = 𝑓′ 𝑥 Δ𝑥 𝑁→∞ DIFFERENTIATION 𝑓 𝑎 + Δ𝑥 − 𝑓(𝑎) lim !→! Δ𝑥 𝑔 𝑥 𝑑(𝑥) LINEAR ALGEBRA ADDITION 2 2 + = SCALAR MULTIPLY 2 6 3∗ = 15 MATRIX VECTOR MULTIPLICATION Linhas x Colunas x Vetor: Colunas A = Linhas B 𝐴!,! ∗ 𝐵!,! = 𝐶!,! = ∗ 2 5 ∗ = OU 1 5 ∗ = ∗ + ∗ + ∗ = 0 x Matrix: Colunas A = Linhas B Linhas A = Colunas B 𝑨𝟐,𝟏 = 𝟐𝒂 𝒍𝒊𝒏𝒉𝒂 𝒙 𝟏𝒂 𝒄𝒐𝒍𝒖𝒏𝒂 3 24 ∗ = 14 37 ∗ = 12 30 IMPORTANTE 𝑨𝟐,𝟑 = 𝟐𝒂 𝒍𝒊𝒏𝒉𝒂 𝒙 𝟑𝒂 𝒄𝒐𝒍𝒖𝒏𝒂 0 −3 ∗ = 0 𝐴!,! 𝐴!,! 𝐴!,! 𝐴 𝐴 𝐴 = !,! !,! !,! = −2 𝐴!,! 𝐴!,! 𝐴!,! PERMUTATION LEFT=exchange rows 𝑎 𝑏 𝑐 𝑑 ∗ = 𝑐 𝑑 𝑎 𝑏 RIGHT=exchange columns 𝑎 𝑏 𝑏 𝑎 ∗ = 𝑐 𝑑 𝑑 𝑐 IDENTIDADE 0 0 DIAGONAL 0 0 TRANSPOSE ! 𝐴= 𝐴 = 6 PROPRIEDADES Not commutative 𝐴 ∗ 𝐵 ≠ 𝐵 ∗ 𝐴 Associative 𝐴 ∗ 𝐵 ∗ 𝐶 = 𝐴 ∗ (𝐵 ∗ 𝐶) Inverse (only squared) 𝐴!! ≠ 𝐴 𝐴!! 𝐴 = 𝐼 = DETERMINANTE = 1.2 − 3.4 = −10 = 1.5.9 + 4.8.3 + 7.2.6 − 7.5.3 − 1.8.6 − 4.2.9 ELASTICIDADE DE DEMANDA (𝑄! − 𝑄! ) (𝑃! + 𝑃! ) 𝜌= (𝑄! + 𝑄! ) (𝑃! − 𝑃! ) ...